use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class DDLTask method createTable.
/**
* Create a new table.
*
* @param db
* The database in question.
* @param crtTbl
* This is the table we're creating.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
// create the table
Table tbl = crtTbl.toTable(conf);
List<SQLPrimaryKey> primaryKeys = crtTbl.getPrimaryKeys();
List<SQLForeignKey> foreignKeys = crtTbl.getForeignKeys();
LOG.info("creating table " + tbl.getDbName() + "." + tbl.getTableName() + " on " + tbl.getDataLocation());
if (crtTbl.getReplicationSpec().isInReplicationScope() && (!crtTbl.getReplaceMode())) {
// if this is a replication spec, then replace-mode semantics might apply.
// if we're already asking for a table replacement, then we can skip this check.
// however, otherwise, if in replication scope, and we've not been explicitly asked
// to replace, we should check if the object we're looking at exists, and if so,
// trigger replace-mode semantics.
Table existingTable = db.getTable(tbl.getDbName(), tbl.getTableName(), false);
if (existingTable != null) {
if (!crtTbl.getReplicationSpec().allowEventReplacementInto(existingTable)) {
// no replacement, the existing table state is newer than our update.
return 0;
} else {
// we replace existing table.
crtTbl.setReplaceMode(true);
}
}
}
// to UTC by default (only if the table property is not set)
if (tbl.getSerializationLib().equals(ParquetHiveSerDe.class.getName())) {
SessionState ss = SessionState.get();
if (ss.getConf().getBoolVar(ConfVars.HIVE_PARQUET_INT96_DEFAULT_UTC_WRITE_ZONE)) {
String parquetTimezone = tbl.getProperty(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY);
if (parquetTimezone == null || parquetTimezone.isEmpty()) {
tbl.setProperty(ParquetTableUtils.PARQUET_INT96_WRITE_ZONE_PROPERTY, ParquetTableUtils.PARQUET_INT96_NO_ADJUSTMENT_ZONE);
}
}
}
// create the table
if (crtTbl.getReplaceMode()) {
// replace-mode creates are really alters using CreateTableDesc.
try {
db.alterTable(tbl.getDbName() + "." + tbl.getTableName(), tbl, null);
} catch (InvalidOperationException e) {
throw new HiveException("Unable to alter table. " + e.getMessage(), e);
}
} else {
if ((foreignKeys != null && foreignKeys.size() > 0) || (primaryKeys != null && primaryKeys.size() > 0)) {
db.createTable(tbl, crtTbl.getIfNotExists(), primaryKeys, foreignKeys);
} else {
db.createTable(tbl, crtTbl.getIfNotExists());
}
if (crtTbl.isCTAS()) {
Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName());
DataContainer dc = new DataContainer(createdTable.getTTable());
SessionState.get().getLineageState().setLineage(createdTable.getPath(), dc, createdTable.getCols());
}
}
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
return 0;
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class Utilities method getSessionSpecifiedClassLoader.
/**
* get session specified class loader and get current class loader if fall
*
* @return
*/
public static ClassLoader getSessionSpecifiedClassLoader() {
SessionState state = SessionState.get();
if (state == null || state.getConf() == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Hive Conf not found or Session not initiated, use thread based class loader instead");
}
return JavaUtils.getClassLoader();
}
ClassLoader sessionCL = state.getConf().getClassLoader();
if (sessionCL != null) {
if (LOG.isTraceEnabled()) {
//it's normal case
LOG.trace("Use session specified class loader");
}
return sessionCL;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Session specified class loader not found, use thread based class loader");
}
return JavaUtils.getClassLoader();
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class Utilities method restoreSessionSpecifiedClassLoader.
public static void restoreSessionSpecifiedClassLoader(ClassLoader prev) {
SessionState state = SessionState.get();
if (state != null && state.getConf() != null) {
ClassLoader current = state.getConf().getClassLoader();
if (current != prev && JavaUtils.closeClassLoadersTo(current, prev)) {
Thread.currentThread().setContextClassLoader(prev);
state.getConf().setClassLoader(prev);
}
}
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class PostExecOrcFileDump method run.
@Override
public void run(HookContext hookContext) throws Exception {
assert (hookContext.getHookType() == HookContext.HookType.POST_EXEC_HOOK);
HiveConf conf = hookContext.getConf();
LOG.info("Executing post execution hook to print orc file dump..");
QueryPlan plan = hookContext.getQueryPlan();
if (plan == null) {
return;
}
FetchTask fetchTask = plan.getFetchTask();
if (fetchTask != null) {
SessionState ss = SessionState.get();
SessionState.LogHelper console = ss.getConsole();
// file dump should write to session state console's error stream
PrintStream old = System.out;
System.setOut(console.getErrStream());
FetchWork fetchWork = fetchTask.getWork();
boolean partitionedTable = fetchWork.isPartitioned();
List<Path> directories;
if (partitionedTable) {
LOG.info("Printing orc file dump for files from partitioned directory..");
directories = fetchWork.getPartDir();
} else {
LOG.info("Printing orc file dump for files from table directory..");
directories = Lists.newArrayList();
directories.add(fetchWork.getTblDir());
}
for (Path dir : directories) {
FileSystem fs = dir.getFileSystem(conf);
List<FileStatus> fileList = HdfsUtils.listLocatedStatus(fs, dir, hiddenFileFilter);
for (FileStatus fileStatus : fileList) {
LOG.info("Printing orc file dump for " + fileStatus.getPath());
if (fileStatus.getLen() > 0) {
try {
// just creating orc reader is going to do sanity checks to make sure its valid ORC file
OrcFile.createReader(fs, fileStatus.getPath());
console.printError("-- BEGIN ORC FILE DUMP --");
FileDump.main(new String[] { fileStatus.getPath().toString(), "--rowindex=*" });
console.printError("-- END ORC FILE DUMP --");
} catch (FileFormatException e) {
LOG.warn("File " + fileStatus.getPath() + " is not ORC. Skip printing orc file dump");
} catch (IOException e) {
LOG.warn("Skip printing orc file dump. Exception: " + e.getMessage());
}
} else {
LOG.warn("Zero length file encountered. Skip printing orc file dump.");
}
}
}
// restore the old out stream
System.out.flush();
System.setOut(old);
}
}
use of org.apache.hadoop.hive.ql.session.SessionState in project hive by apache.
the class PostExecTezSummaryPrinter method run.
@Override
public void run(HookContext hookContext) throws Exception {
assert (hookContext.getHookType() == HookContext.HookType.POST_EXEC_HOOK);
HiveConf conf = hookContext.getConf();
if (!"tez".equals(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE))) {
return;
}
LOG.info("Executing post execution hook to print tez summary..");
SessionState ss = SessionState.get();
SessionState.LogHelper console = ss.getConsole();
QueryPlan plan = hookContext.getQueryPlan();
if (plan == null) {
return;
}
List<TezTask> rootTasks = Utilities.getTezTasks(plan.getRootTasks());
for (TezTask tezTask : rootTasks) {
LOG.info("Printing summary for tez task: " + tezTask.getName());
TezCounters counters = tezTask.getTezCounters();
if (counters != null) {
String hiveCountersGroup = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP);
for (CounterGroup group : counters) {
if (hiveCountersGroup.equals(group.getDisplayName())) {
console.printError(tezTask.getId() + " HIVE COUNTERS:");
for (TezCounter counter : group) {
console.printError(" " + counter.getDisplayName() + ": " + counter.getValue());
}
} else if (group.getName().equals(FileSystemCounter.class.getName())) {
console.printError(tezTask.getId() + " FILE SYSTEM COUNTERS:");
for (TezCounter counter : group) {
// local file system counters
if (counter.getName().contains("HDFS")) {
console.printError(" " + counter.getDisplayName() + ": " + counter.getValue());
}
}
} else if (group.getName().equals(LlapIOCounters.class.getName())) {
console.printError(tezTask.getId() + " LLAP IO COUNTERS:");
List<String> testSafeCounters = LlapIOCounters.testSafeCounterNames();
for (TezCounter counter : group) {
if (testSafeCounters.contains(counter.getDisplayName())) {
console.printError(" " + counter.getDisplayName() + ": " + counter.getValue());
}
}
}
}
}
}
}
Aggregations