use of org.apache.hadoop.mapred.MiniMRCluster in project hadoop by apache.
the class TestDelegationToken method setup.
@Before
public void setup() throws Exception {
user1 = UserGroupInformation.createUserForTesting("alice", new String[] { "users" });
user2 = UserGroupInformation.createUserForTesting("bob", new String[] { "users" });
cluster = new MiniMRCluster(0, 0, 1, "file:///", 1);
}
use of org.apache.hadoop.mapred.MiniMRCluster in project hadoop by apache.
the class TestGetGroups method setUpJobTracker.
@Before
public void setUpJobTracker() throws IOException, InterruptedException {
cluster = new MiniMRCluster(0, "file:///", 1);
conf = cluster.createJobConf();
}
use of org.apache.hadoop.mapred.MiniMRCluster in project hadoop by apache.
the class TestStreamingStatus method setUp.
/**
* Start the cluster and create input file before running the actual test.
*
* @throws IOException
*/
@Before
public void setUp() throws IOException {
conf = new JobConf();
conf.setBoolean(JTConfig.JT_RETIREJOBS, false);
conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, false);
mr = new MiniMRCluster(1, "file:///", 3, null, null, conf);
Path inFile = new Path(INPUT_FILE);
fs = inFile.getFileSystem(mr.createJobConf());
clean(fs);
buildExpectedJobOutput();
}
use of org.apache.hadoop.mapred.MiniMRCluster in project hive by apache.
the class TestHCatMultiOutputFormat method setup.
@BeforeClass
public static void setup() throws Exception {
System.clearProperty("mapred.job.tracker");
String testDir = System.getProperty("test.tmp.dir", "./");
testDir = testDir + "/test_multitable_" + Math.abs(new Random().nextLong()) + "/";
workDir = new File(new File(testDir).getCanonicalPath());
FileUtil.fullyDelete(workDir);
workDir.mkdirs();
warehousedir = new Path(System.getProperty("test.warehouse.dir"));
HiveConf metastoreConf = new HiveConf();
metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString());
// Run hive metastore server
msPort = MetaStoreTestUtils.startMetaStoreWithRetry(metastoreConf);
// Read the warehouse dir, which can be changed so multiple MetaStore tests could be run on
// the same server
warehousedir = new Path(MetastoreConf.getVar(metastoreConf, MetastoreConf.ConfVars.WAREHOUSE));
// LocalJobRunner does not work with mapreduce OutputCommitter. So need
// to use MiniMRCluster. MAPREDUCE-2350
Configuration conf = new Configuration(true);
conf.set("yarn.scheduler.capacity.root.queues", "default");
conf.set("yarn.scheduler.capacity.root.default.capacity", "100");
FileSystem fs = FileSystem.get(conf);
System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf));
mrConf = mrCluster.createJobConf();
initializeSetup();
warehousedir.getFileSystem(conf).mkdirs(warehousedir);
}
use of org.apache.hadoop.mapred.MiniMRCluster in project hive by apache.
the class TestHCatPartitionPublish method setup.
@BeforeClass
public static void setup() throws Exception {
File workDir = handleWorkDir();
conf.set("yarn.scheduler.capacity.root.queues", "default");
conf.set("yarn.scheduler.capacity.root.default.capacity", "100");
conf.set("fs.pfile.impl", "org.apache.hadoop.fs.ProxyLocalFileSystem");
fs = FileSystem.get(conf);
System.setProperty("hadoop.log.dir", new File(workDir, "/logs").getAbsolutePath());
// LocalJobRunner does not work with mapreduce OutputCommitter. So need
// to use MiniMRCluster. MAPREDUCE-2350
mrCluster = new MiniMRCluster(1, fs.getUri().toString(), 1, null, null, new JobConf(conf));
mrConf = mrCluster.createJobConf();
if (isServerRunning) {
return;
}
msPort = MetaStoreTestUtils.startMetaStoreWithRetry();
Thread.sleep(10000);
isServerRunning = true;
securityManager = System.getSecurityManager();
System.setSecurityManager(new NoExitSecurityManager());
Policy.setPolicy(new DerbyPolicy());
hcatConf = new HiveConf(TestHCatPartitionPublish.class);
hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3);
hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 120, TimeUnit.SECONDS);
hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName());
hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
msc = new HiveMetaStoreClient(hcatConf);
System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
}
Aggregations