Search in sources :

Example 1 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class ITUseMiniCluster method clusterUp.

@Before
public void clusterUp() throws IOException {
    final Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Before(org.junit.Before)

Example 2 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class StorageLocation method makeBlockPoolDir.

/**
   * Create physical directory for block pools on the data node.
   *
   * @param blockPoolID
   *          the block pool id
   * @param conf
   *          Configuration instance to use.
   * @throws IOException on errors
   */
public void makeBlockPoolDir(String blockPoolID, Configuration conf) throws IOException {
    if (conf == null) {
        conf = new HdfsConfiguration();
    }
    LocalFileSystem localFS = FileSystem.getLocal(conf);
    FsPermission permission = new FsPermission(conf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT));
    File data = new File(getBpURI(blockPoolID, Storage.STORAGE_DIR_CURRENT));
    try {
        DiskChecker.checkDir(localFS, new Path(data.toURI()), permission);
    } catch (IOException e) {
        DataStorage.LOG.warn("Invalid directory in: " + data.getCanonicalPath() + ": " + e.getMessage());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File)

Example 3 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class DataNode method instantiateDataNode.

/** Instantiate a single datanode object, along with its secure resources. 
   * This must be run by invoking{@link DataNode#runDatanodeDaemon()} 
   * subsequently. 
   */
public static DataNode instantiateDataNode(String[] args, Configuration conf, SecureResources resources) throws IOException {
    if (conf == null)
        conf = new HdfsConfiguration();
    if (args != null) {
        // parse generic hadoop options
        GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
        args = hParser.getRemainingArgs();
    }
    if (!parseArguments(args, conf)) {
        printUsage(System.err);
        return null;
    }
    Collection<StorageLocation> dataLocations = getStorageLocations(conf);
    UserGroupInformation.setConfiguration(conf);
    SecurityUtil.login(conf, DFS_DATANODE_KEYTAB_FILE_KEY, DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, getHostName(conf));
    return makeInstance(dataLocations, conf, resources);
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser)

Example 4 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class NameNode method createNameNode.

public static NameNode createNameNode(String[] argv, Configuration conf) throws IOException {
    LOG.info("createNameNode " + Arrays.asList(argv));
    if (conf == null)
        conf = new HdfsConfiguration();
    // Parse out some generic args into Configuration.
    GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
    argv = hParser.getRemainingArgs();
    // Parse the rest, NN specific args.
    StartupOption startOpt = parseArguments(argv);
    if (startOpt == null) {
        printUsage(System.err);
        return null;
    }
    setStartupOption(conf, startOpt);
    boolean aborted = false;
    switch(startOpt) {
        case FORMAT:
            aborted = format(conf, startOpt.getForceFormat(), startOpt.getInteractiveFormat());
            terminate(aborted ? 1 : 0);
            // avoid javac warning
            return null;
        case GENCLUSTERID:
            System.err.println("Generating new cluster id:");
            System.out.println(NNStorage.newClusterID());
            terminate(0);
            return null;
        case ROLLBACK:
            aborted = doRollback(conf, true);
            terminate(aborted ? 1 : 0);
            // avoid warning
            return null;
        case BOOTSTRAPSTANDBY:
            String[] toolArgs = Arrays.copyOfRange(argv, 1, argv.length);
            int rc = BootstrapStandby.run(toolArgs, conf);
            terminate(rc);
            // avoid warning
            return null;
        case INITIALIZESHAREDEDITS:
            aborted = initializeSharedEdits(conf, startOpt.getForceFormat(), startOpt.getInteractiveFormat());
            terminate(aborted ? 1 : 0);
            // avoid warning
            return null;
        case BACKUP:
        case CHECKPOINT:
            NamenodeRole role = startOpt.toNodeRole();
            DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
            return new BackupNode(conf, role);
        case RECOVER:
            NameNode.doRecovery(startOpt, conf);
            return null;
        case METADATAVERSION:
            printMetadataVersion(conf);
            terminate(0);
            // avoid javac warning
            return null;
        case UPGRADEONLY:
            DefaultMetricsSystem.initialize("NameNode");
            new NameNode(conf);
            terminate(0);
            return null;
        default:
            DefaultMetricsSystem.initialize("NameNode");
            return new NameNode(conf);
    }
}
Also used : StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) RollingUpgradeStartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser) NamenodeRole(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole)

Example 5 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestBlockReaderLocal method testStatistics.

private void testStatistics(boolean isShortCircuit) throws Exception {
    Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
    HdfsConfiguration conf = new HdfsConfiguration();
    TemporarySocketDirectory sockDir = null;
    if (isShortCircuit) {
        DFSInputStream.tcpReadsDisabledForTesting = true;
        sockDir = new TemporarySocketDirectory();
        conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), "TestStatisticsForLocalRead.%d.sock").getAbsolutePath());
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
        DomainSocket.disableBindPathValidation();
    } else {
        conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
    }
    MiniDFSCluster cluster = null;
    final Path TEST_PATH = new Path("/a");
    final long RANDOM_SEED = 4567L;
    FSDataInputStream fsIn = null;
    byte[] original = new byte[BlockReaderLocalTest.TEST_LENGTH];
    FileSystem fs = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).hosts(new String[] { NetUtils.getLocalHostname() }).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, TEST_PATH, BlockReaderLocalTest.TEST_LENGTH, (short) 1, RANDOM_SEED);
        try {
            DFSTestUtil.waitReplication(fs, TEST_PATH, (short) 1);
        } catch (InterruptedException e) {
            Assert.fail("unexpected InterruptedException during " + "waitReplication: " + e);
        } catch (TimeoutException e) {
            Assert.fail("unexpected TimeoutException during " + "waitReplication: " + e);
        }
        fsIn = fs.open(TEST_PATH);
        IOUtils.readFully(fsIn, original, 0, BlockReaderLocalTest.TEST_LENGTH);
        HdfsDataInputStream dfsIn = (HdfsDataInputStream) fsIn;
        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalBytesRead());
        Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalLocalBytesRead());
        if (isShortCircuit) {
            Assert.assertEquals(BlockReaderLocalTest.TEST_LENGTH, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
        } else {
            Assert.assertEquals(0, dfsIn.getReadStatistics().getTotalShortCircuitBytesRead());
        }
        fsIn.close();
        fsIn = null;
    } finally {
        DFSInputStream.tcpReadsDisabledForTesting = false;
        if (fsIn != null)
            fsIn.close();
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
        if (sockDir != null)
            sockDir.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) FileSystem(org.apache.hadoop.fs.FileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)454 Configuration (org.apache.hadoop.conf.Configuration)311 Test (org.junit.Test)311 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)267 Path (org.apache.hadoop.fs.Path)152 FileSystem (org.apache.hadoop.fs.FileSystem)94 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)92 File (java.io.File)72 IOException (java.io.IOException)69 Before (org.junit.Before)56 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)40 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)35 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)30 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)27 RandomAccessFile (java.io.RandomAccessFile)22 ArrayList (java.util.ArrayList)20 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)20 URI (java.net.URI)19 FsPermission (org.apache.hadoop.fs.permission.FsPermission)19