Search in sources :

Example 76 with BeforeClass

use of org.junit.BeforeClass in project hadoop by apache.

the class TestBlockReaderLocal method init.

@BeforeClass
public static void init() {
    sockDir = new TemporarySocketDirectory();
    DomainSocket.disableBindPathValidation();
}
Also used : TemporarySocketDirectory(org.apache.hadoop.net.unix.TemporarySocketDirectory) BeforeClass(org.junit.BeforeClass)

Example 77 with BeforeClass

use of org.junit.BeforeClass in project hadoop by apache.

the class TestOfflineImageViewer method createOriginalFSImage.

// Create a populated namespace for later testing. Save its contents to a
// data structure and store its fsimage location.
// We only want to generate the fsimage file once and use it for
// multiple tests.
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    tempDir = Files.createTempDir();
    MiniDFSCluster cluster = null;
    try {
        final ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager.getPolicyByID(HdfsConstants.XOR_2_1_POLICY_ID);
        Configuration conf = new Configuration();
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
        conf.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
        conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
        conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, ecPolicy.getName());
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        // Create a reasonable namespace
        for (int i = 0; i < NUM_DIRS; i++, dirCount++) {
            Path dir = new Path("/dir" + i);
            hdfs.mkdirs(dir);
            writtenFiles.put(dir.toString(), pathToFileEntry(hdfs, dir.toString()));
            for (int j = 0; j < FILES_PER_DIR; j++) {
                Path file = new Path(dir, "file" + j);
                FSDataOutputStream o = hdfs.create(file);
                o.write(23);
                o.close();
                writtenFiles.put(file.toString(), pathToFileEntry(hdfs, file.toString()));
            }
        }
        // Create an empty directory
        Path emptydir = new Path("/emptydir");
        hdfs.mkdirs(emptydir);
        dirCount++;
        writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
        //Create a directory whose name should be escaped in XML
        Path invalidXMLDir = new Path("/dirContainingInvalidXMLCharhere");
        hdfs.mkdirs(invalidXMLDir);
        dirCount++;
        //Create a directory with sticky bits
        Path stickyBitDir = new Path("/stickyBit");
        hdfs.mkdirs(stickyBitDir);
        hdfs.setPermission(stickyBitDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true));
        dirCount++;
        writtenFiles.put(stickyBitDir.toString(), hdfs.getFileStatus(stickyBitDir));
        // Get delegation tokens so we log the delegation token op
        Token<?>[] delegationTokens = hdfs.addDelegationTokens(TEST_RENEWER, null);
        for (Token<?> t : delegationTokens) {
            LOG.debug("got token " + t);
        }
        // Create INodeReference
        final Path src = new Path("/src");
        hdfs.mkdirs(src);
        dirCount++;
        writtenFiles.put(src.toString(), hdfs.getFileStatus(src));
        // Create snapshot and snapshotDiff.
        final Path orig = new Path("/src/orig");
        hdfs.mkdirs(orig);
        final Path file1 = new Path("/src/file");
        FSDataOutputStream o = hdfs.create(file1);
        o.write(23);
        o.write(45);
        o.close();
        hdfs.allowSnapshot(src);
        hdfs.createSnapshot(src, "snapshot");
        final Path dst = new Path("/dst");
        // Rename a directory in the snapshot directory to add snapshotCopy
        // field to the dirDiff entry.
        hdfs.rename(orig, dst);
        dirCount++;
        writtenFiles.put(dst.toString(), hdfs.getFileStatus(dst));
        // Truncate a file in the snapshot directory to add snapshotCopy and
        // blocks fields to the fileDiff entry.
        hdfs.truncate(file1, 1);
        writtenFiles.put(file1.toString(), hdfs.getFileStatus(file1));
        // Set XAttrs so the fsimage contains XAttr ops
        final Path xattr = new Path("/xattr");
        hdfs.mkdirs(xattr);
        dirCount++;
        hdfs.setXAttr(xattr, "user.a1", new byte[] { 0x31, 0x32, 0x33 });
        hdfs.setXAttr(xattr, "user.a2", new byte[] { 0x37, 0x38, 0x39 });
        // OIV should be able to handle empty value XAttrs
        hdfs.setXAttr(xattr, "user.a3", null);
        // OIV should be able to handle XAttr values that can't be expressed
        // as UTF8
        hdfs.setXAttr(xattr, "user.a4", new byte[] { -0x3d, 0x28 });
        writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
        // Set ACLs
        hdfs.setAcl(xattr, Lists.newArrayList(aclEntry(ACCESS, USER, ALL), aclEntry(ACCESS, USER, "foo", ALL), aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE), aclEntry(ACCESS, OTHER, EXECUTE)));
        // Create an Erasure Coded dir
        Path ecDir = new Path("/ec");
        hdfs.mkdirs(ecDir);
        dirCount++;
        hdfs.getClient().setErasureCodingPolicy(ecDir.toString(), ecPolicy.getName());
        writtenFiles.put(ecDir.toString(), hdfs.getFileStatus(ecDir));
        // Create an empty Erasure Coded file
        Path emptyECFile = new Path(ecDir, "EmptyECFile.txt");
        hdfs.create(emptyECFile).close();
        writtenFiles.put(emptyECFile.toString(), pathToFileEntry(hdfs, emptyECFile.toString()));
        filesECCount++;
        // Create a small Erasure Coded file
        Path smallECFile = new Path(ecDir, "SmallECFile.txt");
        FSDataOutputStream out = hdfs.create(smallECFile);
        Random r = new Random();
        byte[] bytes = new byte[1024 * 10];
        r.nextBytes(bytes);
        out.write(bytes);
        writtenFiles.put(smallECFile.toString(), pathToFileEntry(hdfs, smallECFile.toString()));
        filesECCount++;
        // Write results to the fsimage file
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
        // Determine location of fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) Token(org.apache.hadoop.security.token.Token) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Random(java.util.Random) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) BeforeClass(org.junit.BeforeClass)

Example 78 with BeforeClass

use of org.junit.BeforeClass in project hadoop by apache.

the class TestOfflineImageViewerForAcl method createOriginalFSImage.

/**
   * Create a populated namespace for later testing. Save its contents to a
   * data structure and store its fsimage location.
   * We only want to generate the fsimage file once and use it for
   * multiple tests.
   */
@BeforeClass
public static void createOriginalFSImage() throws IOException {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new Configuration();
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        // Create a reasonable namespace with ACLs
        Path dir = new Path("/dirWithNoAcl");
        hdfs.mkdirs(dir);
        writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));
        dir = new Path("/dirWithDefaultAcl");
        hdfs.mkdirs(dir);
        hdfs.setAcl(dir, Lists.newArrayList(aclEntry(DEFAULT, USER, ALL), aclEntry(DEFAULT, USER, "foo", ALL), aclEntry(DEFAULT, GROUP, READ_EXECUTE), aclEntry(DEFAULT, OTHER, NONE)));
        writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));
        Path file = new Path("/noAcl");
        FSDataOutputStream o = hdfs.create(file);
        o.write(23);
        o.close();
        writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
        file = new Path("/withAcl");
        o = hdfs.create(file);
        o.write(23);
        o.close();
        hdfs.setAcl(file, Lists.newArrayList(aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, USER, "foo", READ), aclEntry(ACCESS, GROUP, READ), aclEntry(ACCESS, OTHER, NONE)));
        writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
        file = new Path("/withSeveralAcls");
        o = hdfs.create(file);
        o.write(23);
        o.close();
        hdfs.setAcl(file, Lists.newArrayList(aclEntry(ACCESS, USER, READ_WRITE), aclEntry(ACCESS, USER, "foo", READ_WRITE), aclEntry(ACCESS, USER, "bar", READ), aclEntry(ACCESS, GROUP, READ), aclEntry(ACCESS, GROUP, "group", READ), aclEntry(ACCESS, OTHER, NONE)));
        writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
        // Write results to the fsimage file
        hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
        hdfs.saveNamespace();
        // Determine the location of the fsimage file
        originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
        if (originalFsimage == null) {
            throw new RuntimeException("Didn't generate or can't find fsimage");
        }
        LOG.debug("original FS image file is " + originalFsimage);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BeforeClass(org.junit.BeforeClass)

Example 79 with BeforeClass

use of org.junit.BeforeClass in project hadoop by apache.

the class TestFSMainOperationsWebHdfs method setupCluster.

@BeforeClass
public static void setupCluster() {
    final Configuration conf = new Configuration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
        cluster.waitActive();
        //change root permission to 777
        cluster.getFileSystem().setPermission(new Path("/"), new FsPermission((short) 0777));
        final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
        //get file system as a non-superuser
        final UserGroupInformation current = UserGroupInformation.getCurrentUser();
        final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(current.getShortUserName() + "x", new String[] { "user" });
        fileSystem = ugi.doAs(new PrivilegedExceptionAction<FileSystem>() {

            @Override
            public FileSystem run() throws Exception {
                return FileSystem.get(new URI(uri), conf);
            }
        });
        defaultWorkingDirectory = fileSystem.getWorkingDirectory();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) URI(java.net.URI) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) BeforeClass(org.junit.BeforeClass)

Example 80 with BeforeClass

use of org.junit.BeforeClass in project hadoop by apache.

the class TestHttpsFileSystem method setUp.

@BeforeClass
public static void setUp() throws Exception {
    conf = new Configuration();
    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
    conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
    File base = new File(BASEDIR);
    FileUtil.fullyDelete(base);
    base.mkdirs();
    keystoresDir = new File(BASEDIR).getAbsolutePath();
    sslConfDir = KeyStoreTestUtil.getClasspathDir(TestHttpsFileSystem.class);
    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
    conf.set(DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, KeyStoreTestUtil.getClientSSLConfigFileName());
    conf.set(DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, KeyStoreTestUtil.getServerSSLConfigFileName());
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    OutputStream os = cluster.getFileSystem().create(new Path("/test"));
    os.write(23);
    os.close();
    InetSocketAddress addr = cluster.getNameNode().getHttpsAddress();
    nnAddr = NetUtils.getHostPortString(addr);
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, nnAddr);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) OutputStream(java.io.OutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Aggregations

BeforeClass (org.junit.BeforeClass)2813 File (java.io.File)388 Configuration (org.apache.hadoop.conf.Configuration)287 IOException (java.io.IOException)128 Connection (java.sql.Connection)126 Properties (java.util.Properties)108 Reader (java.io.Reader)99 SqlSessionFactoryBuilder (org.apache.ibatis.session.SqlSessionFactoryBuilder)98 Provisioning (com.zimbra.cs.account.Provisioning)93 ScriptRunner (org.apache.ibatis.jdbc.ScriptRunner)91 HiveConf (org.apache.hadoop.hive.conf.HiveConf)86 MockProvisioning (com.zimbra.cs.account.MockProvisioning)77 Path (org.apache.hadoop.fs.Path)75 URI (java.net.URI)73 HashMap (java.util.HashMap)70 URL (java.net.URL)63 SqlSession (org.apache.ibatis.session.SqlSession)62 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)61 Injector (com.google.inject.Injector)57 CConfiguration (co.cask.cdap.common.conf.CConfiguration)56