use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.
the class HCatDriver method run.
public CommandProcessorResponse run(String command) throws CommandProcessorException {
SessionState ss = SessionState.get();
CommandProcessorResponse cpr = null;
try {
cpr = driver.run(command);
} finally {
// reset conf vars
ss.getConf().set(HCatConstants.HCAT_CREATE_DB_NAME, "");
ss.getConf().set(HCatConstants.HCAT_CREATE_TBL_NAME, "");
}
// Only attempt to do this, if cmd was successful.
// FIXME: it would be probably better to move this to an after-execution
int rc = setFSPermsNGrp(ss, driver.getConf());
if (rc != 0) {
throw new CommandProcessorException(rc);
}
return cpr;
}
use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.
the class HCatCli method processCmd.
private static int processCmd(String cmd) {
SessionState ss = SessionState.get();
long start = System.currentTimeMillis();
cmd = cmd.trim();
String firstToken = cmd.split("\\s+")[0].trim();
if (firstToken.equalsIgnoreCase("set")) {
try {
new SetProcessor().run(cmd.substring(firstToken.length()).trim());
return 0;
} catch (CommandProcessorException e) {
return e.getResponseCode();
}
} else if (firstToken.equalsIgnoreCase("dfs")) {
try {
new DfsProcessor(ss.getConf()).run(cmd.substring(firstToken.length()).trim());
return 0;
} catch (CommandProcessorException e) {
return e.getResponseCode();
}
}
HCatDriver driver = new HCatDriver(ss.getConf());
try {
driver.run(cmd);
} catch (CommandProcessorException e) {
driver.close();
sysExit(ss, e.getResponseCode());
}
int ret = 0;
ArrayList<String> res = new ArrayList<String>();
try {
while (driver.getResults(res)) {
for (String r : res) {
ss.out.println(r);
}
res.clear();
}
} catch (IOException e) {
ss.err.println("Failed with exception " + e.getClass().getName() + ":" + e.getMessage() + "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
ret = 1;
}
int cret = driver.close();
if (ret == 0) {
ret = cret;
}
long end = System.currentTimeMillis();
if (end > start) {
double timeTaken = (end - start) / 1000.0;
ss.err.println("Time taken: " + timeTaken + " seconds");
}
return ret;
}
use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.
the class TestDDLWithRemoteMetastoreSecondNamenode method executeQuery.
private void executeQuery(String query) throws Exception {
try {
CommandProcessorResponse result = driver.run(query);
assertNotNull("driver.run() was expected to return result for query: " + query, result);
} catch (CommandProcessorException e) {
throw new RuntimeException("Execution of (" + query + ") failed with exit status: " + e.getResponseCode() + ", " + e.getMessage() + ", query: " + query);
}
}
use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.
the class AbstractHCatStorerTest method pigValueRangeTest.
/**
* This is used to test how Pig values of various data types which are out of range for Hive
* target column are handled. Currently the options are to raise an error or write NULL. 1. create
* a data file with 1 column, 1 row 2. load into pig 3. use pig to store into Hive table 4. read
* from Hive table using Pig 5. check that read value is what is expected
*
* @param tblName Hive table name to create
* @param hiveType datatype to use for the single column in table
* @param pigType corresponding Pig type when loading file into Pig
* @param goal how out-of-range values from Pig are handled by HCat, may be {@code null}
* @param inputValue written to file which is read by Pig, thus must be something Pig can read
* (e.g. DateTime.toString(), rather than java.sql.Date)
* @param expectedValue what Pig should see when reading Hive table
* @param format date format to use for comparison of values since default DateTime.toString()
* includes TZ which is meaningless for Hive DATE type
*/
void pigValueRangeTest(String tblName, String hiveType, String pigType, HCatBaseStorer.OOR_VALUE_OPT_VALUES goal, String inputValue, String expectedValue, String format) throws Exception {
AbstractHCatLoaderTest.dropTable(tblName, driver);
final String field = "f1";
AbstractHCatLoaderTest.createTableDefaultDB(tblName, field + " " + hiveType, null, driver, storageFormat);
HcatTestUtils.createTestDataFile(INPUT_FILE_NAME, new String[] { inputValue });
LOG.debug("File=" + INPUT_FILE_NAME);
dumpFile(INPUT_FILE_NAME);
PigServer server = createPigServer(true);
int queryNumber = 1;
logAndRegister(server, "A = load '" + INPUT_FILE_NAME + "' as (" + field + ":" + pigType + ");", queryNumber++);
if (goal == null) {
logAndRegister(server, "store A into '" + tblName + "' using " + HCatStorer.class.getName() + "();", queryNumber++);
} else {
FrontendException fe = null;
try {
logAndRegister(server, "store A into '" + tblName + "' using " + HCatStorer.class.getName() + "('','','-" + HCatStorer.ON_OOR_VALUE_OPT + " " + goal + "');", queryNumber++);
} catch (FrontendException e) {
fe = e;
}
switch(goal) {
case Null:
// do nothing, fall through and verify the data
break;
case Throw:
assertTrue("Expected a FrontendException", fe != null);
assertEquals("Expected a different FrontendException.", fe.getMessage(), "Unable to store alias A");
// this test is done
return;
default:
assertFalse("Unexpected goal: " + goal, 1 == 1);
}
}
logAndRegister(server, "B = load '" + tblName + "' using " + HCatLoader.class.getName() + "();", queryNumber);
try {
driver.run("select * from " + tblName);
} catch (CommandProcessorException e) {
LOG.debug("cpr.respCode=" + e.getResponseCode() + " cpr.errMsg=" + e.getMessage() + " for table " + tblName);
}
List l = new ArrayList();
driver.getResults(l);
LOG.debug("Dumping rows via SQL from " + tblName);
for (Object t : l) {
LOG.debug(t == null ? null : t.toString() + " t.class=" + t.getClass());
}
Iterator<Tuple> itr = server.openIterator("B");
int numRowsRead = 0;
while (itr.hasNext()) {
Tuple t = itr.next();
if ("date".equals(hiveType)) {
DateTime dateTime = (DateTime) t.get(0);
assertTrue(format != null);
assertEquals("Comparing Pig to Raw data for table " + tblName, expectedValue, dateTime == null ? null : dateTime.toString(format));
} else {
assertEquals("Comparing Pig to Raw data for table " + tblName, expectedValue, t.isNull(0) ? null : t.get(0).toString());
}
// see comment at "Dumping rows via SQL..." for why this doesn't work
// assertEquals("Comparing Pig to Hive", t.get(0), l.get(0));
numRowsRead++;
}
assertEquals("Expected " + 1 + " rows; got " + numRowsRead + " file=" + INPUT_FILE_NAME + "; table " + tblName, 1, numRowsRead);
/*
* Misc notes: Unfortunately Timestamp.toString() adjusts the value for local TZ and 't' is a
* String thus the timestamp in 't' doesn't match rawData
*/
}
use of org.apache.hadoop.hive.ql.processors.CommandProcessorException in project hive by apache.
the class TestReplicationScenariosAcidTables method testAcidTablesBootstrapWithConcurrentWrites.
@Test
public void testAcidTablesBootstrapWithConcurrentWrites() throws Throwable {
HiveConf primaryConf = primary.getConf();
primary.run("use " + primaryDbName).run("create table t1 (id int) clustered by(id) into 3 buckets stored as orc " + "tblproperties (\"transactional\"=\"true\")").run("insert into t1 values(1)");
// Perform concurrent write on the acid table t1 when bootstrap dump in progress. Bootstrap
// won't see the written data but the subsequent incremental repl should see it.
BehaviourInjection<CallerArguments, Boolean> callerInjectedBehavior = new BehaviourInjection<CallerArguments, Boolean>() {
@Nullable
@Override
public Boolean apply(@Nullable CallerArguments args) {
if (injectionPathCalled) {
nonInjectedPathCalled = true;
} else {
// Insert another row to t1 from another txn when bootstrap dump in progress.
injectionPathCalled = true;
Thread t = new Thread(new Runnable() {
@Override
public void run() {
LOG.info("Entered new thread");
IDriver driver = DriverFactory.newDriver(primaryConf);
SessionState.start(new CliSessionState(primaryConf));
try {
driver.run("insert into " + primaryDbName + ".t1 values(2)");
} catch (CommandProcessorException e) {
throw new RuntimeException(e);
}
LOG.info("Exit new thread success");
}
});
t.start();
LOG.info("Created new thread {}", t.getName());
try {
t.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
return true;
}
};
InjectableBehaviourObjectStore.setCallerVerifier(callerInjectedBehavior);
WarehouseInstance.Tuple bootstrapDump = null;
try {
bootstrapDump = primary.dump(primaryDbName);
callerInjectedBehavior.assertInjectionsPerformed(true, true);
} finally {
// reset the behaviour
InjectableBehaviourObjectStore.resetCallerVerifier();
}
// Bootstrap dump has taken snapshot before concurrent tread performed write. So, it won't see data "2".
replica.load(replicatedDbName, primaryDbName).run("use " + replicatedDbName).run("repl status " + replicatedDbName).verifyResult(bootstrapDump.lastReplicationId).run("select id from t1 order by id").verifyResults(new String[] { "1" });
// Incremental should include the concurrent write of data "2" from another txn.
WarehouseInstance.Tuple incrementalDump = primary.dump(primaryDbName);
replica.load(replicatedDbName, primaryDbName).run("use " + replicatedDbName).run("repl status " + replicatedDbName).verifyResult(incrementalDump.lastReplicationId).run("select id from t1 order by id").verifyResults(new String[] { "1", "2" });
}
Aggregations