use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class TestHiveCopyFiles method setUp.
@BeforeClass
public static void setUp() {
hiveConf = new HiveConf(TestHiveCopyFiles.class);
SessionState.start(hiveConf);
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class TestZookeeperLockManager method setup.
@Before
public void setup() {
conf = new HiveConf();
lockObjData = new HiveLockObjectData("1", "10", "SHARED", "show tables");
hiveLock = new HiveLockObject(TABLE, lockObjData);
zLock = new ZooKeeperHiveLock(TABLE_LOCK_PATH, hiveLock, HiveLockMode.SHARED);
while (server == null) {
try {
server = new TestingServer();
CuratorFrameworkFactory.Builder builder = CuratorFrameworkFactory.builder();
client = builder.connectString(server.getConnectString()).retryPolicy(new RetryOneTime(1)).build();
client.start();
} catch (Exception e) {
System.err.println("Getting bind exception - retrying to allocate server");
server = null;
}
}
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class TestHive method setUp.
@Override
protected void setUp() throws Exception {
super.setUp();
hiveConf = new HiveConf(this.getClass());
hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
// enable trash so it can be tested
// FS_TRASH_CHECKPOINT_INTERVAL_KEY (hadoop-2)
hiveConf.setFloat("fs.trash.checkpoint.interval", 30);
// FS_TRASH_INTERVAL_KEY (hadoop-2)
hiveConf.setFloat("fs.trash.interval", 30);
SessionState.start(hiveConf);
try {
hm = Hive.get(hiveConf);
} catch (Exception e) {
System.err.println(StringUtils.stringifyException(e));
System.err.println("Unable to initialize Hive Metastore using configuration: \n " + hiveConf);
throw e;
}
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class TestWorker method inputSplit.
@Test
public void inputSplit() throws Exception {
String basename = "/warehouse/foo/base_1";
String delta1 = "/warehouse/foo/delta_2_3";
String delta2 = "/warehouse/foo/delta_4_7";
HiveConf conf = new HiveConf();
Path file = new Path(System.getProperty("java.io.tmpdir") + System.getProperty("file.separator") + "newWriteInputSplitTest");
FileSystem fs = FileSystem.get(conf);
FSDataOutputStream os = fs.create(file);
for (int i = 0; i < 10; i++) {
os.writeBytes("mary had a little lamb its fleece was white as snow\n");
}
os.close();
List<Path> files = new ArrayList<Path>(1);
files.add(file);
Path[] deltas = new Path[2];
deltas[0] = new Path(delta1);
deltas[1] = new Path(delta2);
CompactorMR.CompactorInputSplit split = new CompactorMR.CompactorInputSplit(conf, 3, files, new Path(basename), deltas);
Assert.assertEquals(520L, split.getLength());
String[] locations = split.getLocations();
Assert.assertEquals(1, locations.length);
Assert.assertEquals("localhost", locations[0]);
ByteArrayOutputStream buf = new ByteArrayOutputStream();
DataOutput out = new DataOutputStream(buf);
split.write(out);
split = new CompactorMR.CompactorInputSplit();
DataInput in = new DataInputStream(new ByteArrayInputStream(buf.toByteArray()));
split.readFields(in);
Assert.assertEquals(3, split.getBucket());
Assert.assertEquals(basename, split.getBaseDir().toString());
deltas = split.getDeltaDirs();
Assert.assertEquals(2, deltas.length);
Assert.assertEquals(delta1, deltas[0].toString());
Assert.assertEquals(delta2, deltas[1].toString());
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class TestWorker method inputSplitNullBase.
@Test
public void inputSplitNullBase() throws Exception {
String delta1 = "/warehouse/foo/delta_2_3";
String delta2 = "/warehouse/foo/delta_4_7";
HiveConf conf = new HiveConf();
Path file = new Path(System.getProperty("java.io.tmpdir") + System.getProperty("file.separator") + "newWriteInputSplitTest");
FileSystem fs = FileSystem.get(conf);
FSDataOutputStream os = fs.create(file);
for (int i = 0; i < 10; i++) {
os.writeBytes("mary had a little lamb its fleece was white as snow\n");
}
os.close();
List<Path> files = new ArrayList<Path>(1);
files.add(file);
Path[] deltas = new Path[2];
deltas[0] = new Path(delta1);
deltas[1] = new Path(delta2);
CompactorMR.CompactorInputSplit split = new CompactorMR.CompactorInputSplit(conf, 3, files, null, deltas);
ByteArrayOutputStream buf = new ByteArrayOutputStream();
DataOutput out = new DataOutputStream(buf);
split.write(out);
split = new CompactorMR.CompactorInputSplit();
DataInput in = new DataInputStream(new ByteArrayInputStream(buf.toByteArray()));
split.readFields(in);
Assert.assertEquals(3, split.getBucket());
Assert.assertNull(split.getBaseDir());
deltas = split.getDeltaDirs();
Assert.assertEquals(2, deltas.length);
Assert.assertEquals(delta1, deltas[0].toString());
Assert.assertEquals(delta2, deltas[1].toString());
}
Aggregations