Search in sources :

Example 11 with MiniMRCluster

use of org.apache.hadoop.mapred.MiniMRCluster in project hadoop by apache.

the class TestDelegationToken method setup.

@Before
public void setup() throws Exception {
    user1 = UserGroupInformation.createUserForTesting("alice", new String[] { "users" });
    user2 = UserGroupInformation.createUserForTesting("bob", new String[] { "users" });
    cluster = new MiniMRCluster(0, 0, 1, "file:///", 1);
}
Also used : MiniMRCluster(org.apache.hadoop.mapred.MiniMRCluster) Before(org.junit.Before)

Example 12 with MiniMRCluster

use of org.apache.hadoop.mapred.MiniMRCluster in project hadoop by apache.

the class TestGetGroups method setUpJobTracker.

@Before
public void setUpJobTracker() throws IOException, InterruptedException {
    cluster = new MiniMRCluster(0, "file:///", 1);
    conf = cluster.createJobConf();
}
Also used : MiniMRCluster(org.apache.hadoop.mapred.MiniMRCluster) Before(org.junit.Before)

Example 13 with MiniMRCluster

use of org.apache.hadoop.mapred.MiniMRCluster in project hadoop by apache.

the class TestStreamingStatus method setUp.

/**
   * Start the cluster and create input file before running the actual test.
   *
   * @throws IOException
   */
@Before
public void setUp() throws IOException {
    conf = new JobConf();
    conf.setBoolean(JTConfig.JT_RETIREJOBS, false);
    conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false);
    mr = new MiniMRCluster(1, "file:///", 3, null, null, conf);
    Path inFile = new Path(INPUT_FILE);
    fs = inFile.getFileSystem(mr.createJobConf());
    clean(fs);
    buildExpectedJobOutput();
}
Also used : Path(org.apache.hadoop.fs.Path) JobConf(org.apache.hadoop.mapred.JobConf) MiniMRCluster(org.apache.hadoop.mapred.MiniMRCluster) Before(org.junit.Before)

Example 14 with MiniMRCluster

use of org.apache.hadoop.mapred.MiniMRCluster in project hive by apache.

the class TestHCatMultiOutputFormat method setup.

@BeforeClass
public static void setup() throws Exception {
    System.clearProperty("mapred.job.tracker");
    String testDir = System.getProperty("test.tmp.dir", "./");
    testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/";
    workDir = new File(new File(testDir).getCanonicalPath());
    FileUtil.fullyDelete(workDir);
    workDir.mkdirs();
    warehousedir = new Path(System.getProperty("test.warehouse.dir"));
    HiveConf metastoreConf = new HiveConf();
    metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString());
    // Run hive metastore server
    msPort = MetaStoreTestUtils.startMetaStoreWithRetry(metastoreConf);
    // Read the warehouse dir, which can be changed so multiple MetaStore tests could be run on
    // the same server
    warehousedir = new Path(MetastoreConf.getVar(metastoreConf, MetastoreConf.ConfVars.WAREHOUSE));
    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    Configuration conf = new Configuration(true);
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100");
    FileSystem fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf));
    mrConf = mrCluster.createJobConf();
    initializeSetup();
    warehousedir.getFileSystem(conf).mkdirs(warehousedir);
}
Also used : Path(org.apache.hadoop.fs.Path) Random(java.util.Random) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) HiveConf(org.apache.hadoop.hive.conf.HiveConf) File(java.io.File) MiniMRCluster(org.apache.hadoop.mapred.MiniMRCluster) JobConf(org.apache.hadoop.mapred.JobConf) BeforeClass(org.junit.BeforeClass)

Example 15 with MiniMRCluster

use of org.apache.hadoop.mapred.MiniMRCluster in project hive by apache.

the class TestHCatPartitionPublish method setup.

@BeforeClass
public static void setup() throws Exception {
    File workDir = handleWorkDir();
    conf.set("yarn.scheduler.capacity.root.queues", "default");
    conf.set("yarn.scheduler.capacity.root.default.capacity", "100");
    conf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem");
    fs = FileSystem.get(conf);
    System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
    // LocalJobRunner does not work with mapreduce OutputCommitter. So need
    // to use MiniMRCluster. MAPREDUCE-2350
    mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf));
    mrConf = mrCluster.createJobConf();
    if (isServerRunning) {
        return;
    }
    msPort = MetaStoreTestUtils.startMetaStoreWithRetry();
    Thread.sleep(10000);
    isServerRunning = true;
    securityManager = System.getSecurityManager();
    System.setSecurityManager(new NoExitSecurityManager());
    Policy.setPolicy(new DerbyPolicy());
    hcatConf = new HiveConf(TestHCatPartitionPublish.class);
    hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
    hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
    hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 120, TimeUnit.SECONDS);
    hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
    hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
    hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
    msc = new HiveMetaStoreClient(hcatConf);
    System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
    System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
}
Also used : DerbyPolicy(org.apache.hive.hcatalog.DerbyPolicy) HiveMetaStoreClient(org.apache.hadoop.hive.metastore.HiveMetaStoreClient) HCatSemanticAnalyzer(org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer) HiveConf(org.apache.hadoop.hive.conf.HiveConf) NoExitSecurityManager(org.apache.hive.hcatalog.NoExitSecurityManager) File(java.io.File) MiniMRCluster(org.apache.hadoop.mapred.MiniMRCluster) JobConf(org.apache.hadoop.mapred.JobConf) BeforeClass(org.junit.BeforeClass)

Aggregations

MiniMRCluster (org.apache.hadoop.mapred.MiniMRCluster)15 Path (org.apache.hadoop.fs.Path)8 JobConf (org.apache.hadoop.mapred.JobConf)8 Configuration (org.apache.hadoop.conf.Configuration)7 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)7 FileSystem (org.apache.hadoop.fs.FileSystem)6 File (java.io.File)5 Before (org.junit.Before)5 Test (org.junit.Test)4 Utils (org.apache.hadoop.mapred.Utils)3 BeforeClass (org.junit.BeforeClass)3 BufferedReader (java.io.BufferedReader)2 DataOutputStream (java.io.DataOutputStream)2 IOException (java.io.IOException)2 InputStreamReader (java.io.InputStreamReader)2 InetAddress (java.net.InetAddress)2 ArrayList (java.util.ArrayList)2 Map (java.util.Map)2 HiveConf (org.apache.hadoop.hive.conf.HiveConf)2 FileOutputStream (java.io.FileOutputStream)1