use of org.apache.hadoop.hive.ql.lockmgr.LockException in project hive by apache.
the class TransactionBatch method setupHeartBeatThread.
private void setupHeartBeatThread() {
// start heartbeat thread
ThreadFactory threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("HiveStreamingConnection-Heartbeat-Thread").build();
this.scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(threadFactory);
long heartBeatInterval;
long initialDelay;
try {
// if HIVE_TXN_TIMEOUT is defined, heartbeat interval will be HIVE_TXN_TIMEOUT/2
heartBeatInterval = DbTxnManager.getHeartbeatInterval(conn.getConf());
} catch (LockException e) {
heartBeatInterval = DEFAULT_HEARTBEAT_INTERVAL;
}
// to introduce some randomness and to avoid hammering the metastore at the same time (same logic as DbTxnManager)
initialDelay = (long) (heartBeatInterval * 0.75 * Math.random());
LOG.info("Starting heartbeat thread with interval: {} ms initialDelay: {} ms for agentInfo: {}", heartBeatInterval, initialDelay, conn.getAgentInfo());
Runnable runnable = new HeartbeatRunnable(conn, minTxnId, maxTxnId, transactionLock, isTxnClosed);
this.scheduledExecutorService.scheduleWithFixedDelay(runnable, initialDelay, heartBeatInterval, TimeUnit.MILLISECONDS);
}
use of org.apache.hadoop.hive.ql.lockmgr.LockException in project hive by apache.
the class DDLSemanticAnalyzer method analyzeShowLocks.
/**
* Add the task according to the parsed command tree. This is used for the CLI
* command "SHOW LOCKS;".
*
* @param ast
* The parsed command tree.
* @throws SemanticException
* Parsing failed
*/
private void analyzeShowLocks(ASTNode ast) throws SemanticException {
String tableName = null;
HashMap<String, String> partSpec = null;
boolean isExtended = false;
if (ast.getChildCount() >= 1) {
// table for which show locks is being executed
for (int i = 0; i < ast.getChildCount(); i++) {
ASTNode child = (ASTNode) ast.getChild(i);
if (child.getType() == HiveParser.TOK_TABTYPE) {
ASTNode tableTypeExpr = child;
tableName = QualifiedNameUtil.getFullyQualifiedName((ASTNode) tableTypeExpr.getChild(0));
// get partition metadata if partition specified
if (tableTypeExpr.getChildCount() == 2) {
ASTNode partSpecNode = (ASTNode) tableTypeExpr.getChild(1);
partSpec = getValidatedPartSpec(getTable(tableName), partSpecNode, conf, false);
}
} else if (child.getType() == HiveParser.KW_EXTENDED) {
isExtended = true;
}
}
}
HiveTxnManager txnManager = null;
try {
txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
} catch (LockException e) {
throw new SemanticException(e.getMessage());
}
ShowLocksDesc showLocksDesc = new ShowLocksDesc(ctx.getResFile(), tableName, partSpec, isExtended, txnManager.useNewShowLocksFormat());
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showLocksDesc)));
setFetchTask(createFetchTask(showLocksDesc.getSchema()));
// Need to initialize the lock manager
ctx.setNeedLockMgr(true);
}
use of org.apache.hadoop.hive.ql.lockmgr.LockException in project hive by apache.
the class LoadSemanticAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode ast) throws SemanticException {
boolean isLocal = false;
boolean isOverWrite = false;
Tree fromTree = ast.getChild(0);
Tree tableTree = ast.getChild(1);
if (ast.getChildCount() == 4) {
isLocal = true;
isOverWrite = true;
}
if (ast.getChildCount() == 3) {
if (ast.getChild(2).getText().toLowerCase().equals("local")) {
isLocal = true;
} else {
isOverWrite = true;
}
}
// initialize load path
URI fromURI;
try {
String fromPath = stripQuotes(fromTree.getText());
fromURI = initializeFromURI(fromPath, isLocal);
} catch (IOException e) {
throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(fromTree, e.getMessage()), e);
} catch (URISyntaxException e) {
throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(fromTree, e.getMessage()), e);
}
// initialize destination table/partition
TableSpec ts = new TableSpec(db, conf, (ASTNode) tableTree);
if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) {
throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
}
if (ts.tableHandle.isNonNative()) {
throw new SemanticException(ErrorMsg.LOAD_INTO_NON_NATIVE.getMsg());
}
if (ts.tableHandle.isStoredAsSubDirectories()) {
throw new SemanticException(ErrorMsg.LOAD_INTO_STORED_AS_DIR.getMsg());
}
List<FieldSchema> parts = ts.tableHandle.getPartitionKeys();
if ((parts != null && parts.size() > 0) && (ts.partSpec == null || ts.partSpec.size() == 0)) {
throw new SemanticException(ErrorMsg.NEED_PARTITION_ERROR.getMsg());
}
List<String> bucketCols = ts.tableHandle.getBucketCols();
if (bucketCols != null && !bucketCols.isEmpty()) {
String error = StrictChecks.checkBucketing(conf);
if (error != null) {
throw new SemanticException("Please load into an intermediate table" + " and use 'insert... select' to allow Hive to enforce bucketing. " + error);
}
}
// make sure the arguments make sense
List<FileStatus> files = applyConstraintsAndGetFiles(fromURI, fromTree, isLocal, ts.tableHandle);
// for managed tables, make sure the file formats match
if (TableType.MANAGED_TABLE.equals(ts.tableHandle.getTableType()) && conf.getBoolVar(HiveConf.ConfVars.HIVECHECKFILEFORMAT)) {
ensureFileFormatsMatch(ts, files, fromURI);
}
inputs.add(toReadEntity(new Path(fromURI)));
Task<? extends Serializable> rTask = null;
// create final load/move work
boolean preservePartitionSpecs = false;
Map<String, String> partSpec = ts.getPartSpec();
if (partSpec == null) {
partSpec = new LinkedHashMap<String, String>();
outputs.add(new WriteEntity(ts.tableHandle, (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT)));
} else {
try {
Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false);
if (part != null) {
if (isOverWrite) {
outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT_OVERWRITE));
} else {
outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT));
// If partition already exists and we aren't overwriting it, then respect
// its current location info rather than picking it from the parent TableDesc
preservePartitionSpecs = true;
}
} else {
outputs.add(new WriteEntity(ts.tableHandle, (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT)));
}
} catch (HiveException e) {
throw new SemanticException(e);
}
}
Long writeId = null;
int stmtId = -1;
if (AcidUtils.isTransactionalTable(ts.tableHandle)) {
try {
writeId = SessionState.get().getTxnMgr().getTableWriteId(ts.tableHandle.getDbName(), ts.tableHandle.getTableName());
} catch (LockException ex) {
throw new SemanticException("Failed to allocate the write id", ex);
}
stmtId = SessionState.get().getTxnMgr().getStmtIdAndIncrement();
}
// Note: this sets LoadFileType incorrectly for ACID; is that relevant for load?
// See setLoadFileType and setIsAcidIow calls elsewhere for an example.
LoadTableDesc loadTableWork = new LoadTableDesc(new Path(fromURI), Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING, writeId);
loadTableWork.setStmtId(stmtId);
if (preservePartitionSpecs) {
// Note : preservePartitionSpecs=true implies inheritTableSpecs=false but
// but preservePartitionSpecs=false(default) here is not sufficient enough
// info to set inheritTableSpecs=true
loadTableWork.setInheritTableSpecs(false);
}
Task<? extends Serializable> childTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, true, isLocal));
if (rTask != null) {
rTask.addDependentTask(childTask);
} else {
rTask = childTask;
}
rootTasks.add(rTask);
// The user asked for stats to be collected.
// Some stats like number of rows require a scan of the data
// However, some other stats, like number of files, do not require a complete scan
// Update the stats which do not require a complete scan.
Task<? extends Serializable> statTask = null;
if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
BasicStatsWork basicStatsWork = new BasicStatsWork(loadTableWork);
basicStatsWork.setNoStatsAggregator(true);
basicStatsWork.setClearAggregatorStats(true);
StatsWork columnStatsWork = new StatsWork(ts.tableHandle, basicStatsWork, conf);
statTask = TaskFactory.get(columnStatsWork);
}
if (statTask != null) {
childTask.addDependentTask(statTask);
}
}
use of org.apache.hadoop.hive.ql.lockmgr.LockException in project hive by apache.
the class HiveMaterializedViewUtils method isOutdatedMaterializedView.
/**
* Utility method that returns whether a materialized view is outdated (true), not outdated
* (false), or it cannot be determined (null). The latest case may happen e.g. when the
* materialized view definition uses external tables.
*/
public static Boolean isOutdatedMaterializedView(String validTxnsList, HiveTxnManager txnMgr, Set<TableName> tablesUsed, Table materializedViewTable) throws LockException {
List<String> tablesUsedNames = tablesUsed.stream().map(tableName -> TableName.getDbTable(tableName.getDb(), tableName.getTable())).collect(Collectors.toList());
ValidTxnWriteIdList currentTxnWriteIds = txnMgr.getValidWriteIds(tablesUsedNames, validTxnsList);
if (currentTxnWriteIds == null) {
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as we could not obtain current txn ids");
return null;
}
MaterializedViewMetadata mvMetadata = materializedViewTable.getMVMetadata();
Set<String> storedTablesUsed = materializedViewTable.getMVMetadata().getSourceTableFullNames();
if (mvMetadata.getValidTxnList() == null || mvMetadata.getValidTxnList().isEmpty()) {
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as we could not obtain materialization txn ids");
return null;
}
boolean ignore = false;
ValidTxnWriteIdList mvTxnWriteIds = new ValidTxnWriteIdList(mvMetadata.getValidTxnList());
for (String fullyQualifiedTableName : tablesUsedNames) {
// existing tables with an append-columns only join, i.e., PK-FK + not null.
if (!storedTablesUsed.contains(fullyQualifiedTableName)) {
continue;
}
ValidWriteIdList tableCurrentWriteIds = currentTxnWriteIds.getTableValidWriteIdList(fullyQualifiedTableName);
if (tableCurrentWriteIds == null) {
// Uses non-transactional table, cannot be considered
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as it is outdated and cannot be considered for " + " rewriting because it uses non-transactional table " + fullyQualifiedTableName);
ignore = true;
break;
}
ValidWriteIdList tableWriteIds = mvTxnWriteIds.getTableValidWriteIdList(fullyQualifiedTableName);
if (tableWriteIds == null) {
// This should not happen, but we ignore for safety
LOG.warn("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as details about txn ids for table " + fullyQualifiedTableName + " could not be found in " + mvTxnWriteIds);
ignore = true;
break;
}
if (!TxnIdUtils.checkEquivalentWriteIds(tableCurrentWriteIds, tableWriteIds)) {
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " contents are outdated");
return true;
}
}
if (ignore) {
return null;
}
return false;
}
use of org.apache.hadoop.hive.ql.lockmgr.LockException in project hive by apache.
the class LoadSemanticAnalyzer method analyzeLoad.
private void analyzeLoad(ASTNode ast) throws SemanticException {
fromTree = ast.getChild(0);
tableTree = ast.getChild(1);
boolean inputInfo = false;
// Check the last node
ASTNode child = (ASTNode) ast.getChild(ast.getChildCount() - 1);
if (child.getToken().getType() == HiveParser.TOK_INPUTFORMAT) {
if (child.getChildCount() != 2) {
throw new SemanticException("FileFormat should contain both input format and Serde");
}
try {
inputFormatClassName = stripQuotes(child.getChild(0).getText());
serDeClassName = stripQuotes(child.getChild(1).getText());
inputInfo = true;
} catch (Exception e) {
throw new SemanticException("FileFormat inputFormatClassName or serDeClassName is incorrect");
}
}
if ((!inputInfo && ast.getChildCount() == 4) || (inputInfo && ast.getChildCount() == 5)) {
isLocal = true;
isOverWrite = true;
}
if ((!inputInfo && ast.getChildCount() == 3) || (inputInfo && ast.getChildCount() == 4)) {
if (ast.getChild(2).getText().toLowerCase().equals("local")) {
isLocal = true;
} else {
isOverWrite = true;
}
}
// initialize load path
URI fromURI;
try {
String fromPath = stripQuotes(fromTree.getText());
fromURI = initializeFromURI(fromPath, isLocal);
} catch (IOException | URISyntaxException e) {
throw new SemanticException(ASTErrorUtils.getMsg(ErrorMsg.INVALID_PATH.getMsg(), fromTree, e.getMessage()), e);
}
// initialize destination table/partition
TableSpec ts = new TableSpec(db, conf, (ASTNode) tableTree);
if (ts.tableHandle.isView() || ts.tableHandle.isMaterializedView()) {
throw new SemanticException(ErrorMsg.DML_AGAINST_VIEW.getMsg());
}
if (ts.tableHandle.isNonNative()) {
throw new SemanticException(ErrorMsg.LOAD_INTO_NON_NATIVE.getMsg());
}
if (ts.tableHandle.isStoredAsSubDirectories()) {
throw new SemanticException(ErrorMsg.LOAD_INTO_STORED_AS_DIR.getMsg());
}
List<FieldSchema> parts = ts.tableHandle.getPartitionKeys();
if ((parts != null && parts.size() > 0) && (ts.partSpec == null || ts.partSpec.size() == 0)) {
// launch a tez job
reparseAndSuperAnalyze(ts.tableHandle, fromURI);
return;
}
List<String> bucketCols = ts.tableHandle.getBucketCols();
if (bucketCols != null && !bucketCols.isEmpty()) {
String error = StrictChecks.checkBucketing(conf);
if (error != null) {
// launch a tez job
reparseAndSuperAnalyze(ts.tableHandle, fromURI);
return;
}
}
// make sure the arguments make sense
List<FileStatus> files = applyConstraintsAndGetFiles(fromURI, ts.tableHandle);
if (queryReWritten) {
return;
}
// for managed tables, make sure the file formats match
if (TableType.MANAGED_TABLE.equals(ts.tableHandle.getTableType()) && conf.getBoolVar(HiveConf.ConfVars.HIVECHECKFILEFORMAT)) {
ensureFileFormatsMatch(ts, files, fromURI);
}
inputs.add(toReadEntity(new Path(fromURI)));
// create final load/move work
boolean preservePartitionSpecs = false;
Map<String, String> partSpec = ts.getPartSpec();
if (partSpec == null) {
partSpec = new LinkedHashMap<String, String>();
outputs.add(new WriteEntity(ts.tableHandle, (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT)));
} else {
try {
Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false);
if (part != null) {
if (isOverWrite) {
outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT_OVERWRITE));
} else {
outputs.add(new WriteEntity(part, WriteEntity.WriteType.INSERT));
// If partition already exists and we aren't overwriting it, then respect
// its current location info rather than picking it from the parent TableDesc
preservePartitionSpecs = true;
}
} else {
outputs.add(new WriteEntity(ts.tableHandle, (isOverWrite ? WriteEntity.WriteType.INSERT_OVERWRITE : WriteEntity.WriteType.INSERT)));
}
} catch (HiveException e) {
throw new SemanticException(e);
}
}
Long writeId = null;
int stmtId = -1;
boolean isTxnTable = AcidUtils.isTransactionalTable(ts.tableHandle);
if (isTxnTable) {
try {
writeId = getTxnMgr().getTableWriteId(ts.tableHandle.getDbName(), ts.tableHandle.getTableName());
} catch (LockException ex) {
throw new SemanticException("Failed to allocate the write id", ex);
}
stmtId = getTxnMgr().getStmtIdAndIncrement();
}
// Note: this sets LoadFileType incorrectly for ACID; is that relevant for load?
// See setLoadFileType and setIsAcidIow calls elsewhere for an example.
LoadTableDesc loadTableWork = new LoadTableDesc(new Path(fromURI), Utilities.getTableDesc(ts.tableHandle), partSpec, isOverWrite ? LoadFileType.REPLACE_ALL : LoadFileType.KEEP_EXISTING, writeId);
loadTableWork.setStmtId(stmtId);
loadTableWork.setInsertOverwrite(isOverWrite);
if (preservePartitionSpecs) {
// Note : preservePartitionSpecs=true implies inheritTableSpecs=false but
// but preservePartitionSpecs=false(default) here is not sufficient enough
// info to set inheritTableSpecs=true
loadTableWork.setInheritTableSpecs(false);
}
Task<?> childTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, true, isLocal));
rootTasks.add(childTask);
// The user asked for stats to be collected.
// Some stats like number of rows require a scan of the data
// However, some other stats, like number of files, do not require a complete scan
// Update the stats which do not require a complete scan.
Task<?> statTask = null;
if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) {
BasicStatsWork basicStatsWork = new BasicStatsWork(loadTableWork);
basicStatsWork.setNoStatsAggregator(true);
basicStatsWork.setClearAggregatorStats(true);
StatsWork columnStatsWork = new StatsWork(ts.tableHandle, basicStatsWork, conf);
statTask = TaskFactory.get(columnStatsWork);
}
if (statTask != null) {
childTask.addDependentTask(statTask);
}
}
Aggregations