use of org.apache.hadoop.hive.metastore.api.SourceTable in project hive by apache.
the class AlterMaterializedViewRewriteAnalyzer method analyzeInternal.
@Override
public void analyzeInternal(ASTNode root) throws SemanticException {
TableName tableName = getQualifiedTableName((ASTNode) root.getChild(0));
// Value for the flag
boolean rewriteEnable;
switch(root.getChild(1).getType()) {
case HiveParser.TOK_REWRITE_ENABLED:
rewriteEnable = true;
break;
case HiveParser.TOK_REWRITE_DISABLED:
rewriteEnable = false;
break;
default:
throw new SemanticException("Invalid alter materialized view expression");
}
// It can be fully qualified name or use default database
Table materializedViewTable = getTable(tableName, true);
// only uses transactional (MM and ACID) tables
if (rewriteEnable) {
for (SourceTable sourceTable : materializedViewTable.getMVMetadata().getSourceTables()) {
if (!AcidUtils.isTransactionalTable(sourceTable.getTable())) {
throw new SemanticException("Automatic rewriting for materialized view cannot be enabled if the " + "materialized view uses non-transactional tables");
}
}
}
AlterMaterializedViewRewriteDesc desc = new AlterMaterializedViewRewriteDesc(tableName.getNotEmptyDbTable(), rewriteEnable);
if (AcidUtils.isTransactionalTable(materializedViewTable)) {
ddlDescWithWriteId = desc;
}
inputs.add(new ReadEntity(materializedViewTable));
outputs.add(new WriteEntity(materializedViewTable, WriteEntity.WriteType.DDL_EXCLUSIVE));
// Create task for alterMVRewriteDesc
DDLWork work = new DDLWork(getInputs(), getOutputs(), desc);
Task<?> targetTask = TaskFactory.get(work);
// Create task to update rewrite flag as dependant of previous one
MaterializedViewUpdateDesc materializedViewUpdateDesc = new MaterializedViewUpdateDesc(tableName.getNotEmptyDbTable(), rewriteEnable, !rewriteEnable, false);
DDLWork updateDdlWork = new DDLWork(getInputs(), getOutputs(), materializedViewUpdateDesc);
targetTask.addDependentTask(TaskFactory.get(updateDdlWork, conf));
// Add root task
rootTasks.add(targetTask);
}
use of org.apache.hadoop.hive.metastore.api.SourceTable in project hive by apache.
the class SemanticAnalyzer method walkASTMarkTABREF.
private void walkASTMarkTABREF(TableMask tableMask, ASTNode ast, Set<String> cteAlias, Context ctx) throws SemanticException {
Queue<Node> queue = new LinkedList<>();
queue.add(ast);
Map<HivePrivilegeObject, MaskAndFilterInfo> basicInfos = new LinkedHashMap<>();
while (!queue.isEmpty()) {
ASTNode astNode = (ASTNode) queue.poll();
if (astNode.getToken().getType() == HiveParser.TOK_TABREF) {
int aliasIndex = 0;
StringBuilder additionalTabInfo = new StringBuilder();
for (int index = 1; index < astNode.getChildCount(); index++) {
ASTNode ct = (ASTNode) astNode.getChild(index);
if (ct.getToken().getType() == HiveParser.TOK_TABLEBUCKETSAMPLE || ct.getToken().getType() == HiveParser.TOK_TABLESPLITSAMPLE || ct.getToken().getType() == HiveParser.TOK_TABLEPROPERTIES) {
additionalTabInfo.append(ctx.getTokenRewriteStream().toString(ct.getTokenStartIndex(), ct.getTokenStopIndex()));
} else {
aliasIndex = index;
}
}
ASTNode tableTree = (ASTNode) (astNode.getChild(0));
String tabIdName = getUnescapedName(tableTree);
String alias;
if (aliasIndex != 0) {
alias = unescapeIdentifier(astNode.getChild(aliasIndex).getText());
} else {
alias = getUnescapedUnqualifiedTableName(tableTree);
}
// select * from TAB2 [no masking]
if (cteAlias.contains(tabIdName)) {
continue;
}
Table table = null;
try {
table = getTableObjectByName(tabIdName, false);
} catch (HiveException e) {
// This should not happen.
throw new SemanticException("Got exception though getTableObjectByName method should ignore it");
}
if (table == null) {
// Table may not be found when materialization of CTE is on.
STATIC_LOG.debug("Table " + tabIdName + " is not found in walkASTMarkTABREF.");
continue;
}
if (table.isMaterializedView()) {
// do not apply any policies.
for (SourceTable sourceTable : table.getMVMetadata().getSourceTables()) {
String qualifiedTableName = TableName.getDbTable(sourceTable.getTable().getDbName(), sourceTable.getTable().getTableName());
try {
table = getTableObjectByName(qualifiedTableName, true);
} catch (HiveException e) {
// This should not happen.
throw new SemanticException("Table " + qualifiedTableName + " not found when trying to obtain it to check masking/filtering policies");
}
List<String> colNames = new ArrayList<>();
extractColumnInfos(table, colNames, new ArrayList<>());
basicInfos.put(new HivePrivilegeObject(table.getDbName(), table.getTableName(), colNames), null);
}
} else {
List<String> colNames;
List<String> colTypes;
if (this.ctx.isCboSucceeded() && this.columnAccessInfo != null && (colNames = this.columnAccessInfo.getTableToColumnAllAccessMap().get(table.getCompleteName())) != null) {
Map<String, String> colNameToType = table.getAllCols().stream().collect(Collectors.toMap(FieldSchema::getName, FieldSchema::getType));
colTypes = colNames.stream().map(colNameToType::get).collect(Collectors.toList());
} else {
colNames = new ArrayList<>();
colTypes = new ArrayList<>();
extractColumnInfos(table, colNames, colTypes);
}
basicInfos.put(new HivePrivilegeObject(table.getDbName(), table.getTableName(), colNames), new MaskAndFilterInfo(colTypes, additionalTabInfo.toString(), alias, astNode, table.isView(), table.isNonNative()));
}
}
if (astNode.getChildCount() > 0 && !IGNORED_TOKENS.contains(astNode.getToken().getType())) {
for (Node child : astNode.getChildren()) {
queue.offer(child);
}
}
}
List<HivePrivilegeObject> basicPrivObjs = new ArrayList<>(basicInfos.keySet());
List<HivePrivilegeObject> needRewritePrivObjs = tableMask.applyRowFilterAndColumnMasking(basicPrivObjs);
if (needRewritePrivObjs != null && !needRewritePrivObjs.isEmpty()) {
for (HivePrivilegeObject privObj : needRewritePrivObjs) {
MaskAndFilterInfo info = basicInfos.get(privObj);
// First we check whether entity actually needs masking or filtering
if (tableMask.needsMaskingOrFiltering(privObj)) {
if (info == null) {
// when mask/filter should be applied on source tables
throw new SemanticException(ErrorMsg.MASKING_FILTERING_ON_MATERIALIZED_VIEWS_SOURCES, privObj.getDbname(), privObj.getObjectName());
} else {
String replacementText = tableMask.create(privObj, info);
// We don't support masking/filtering against ACID query at the moment
if (ctx.getIsUpdateDeleteMerge()) {
throw new SemanticException(ErrorMsg.MASKING_FILTERING_ON_ACID_NOT_SUPPORTED, privObj.getDbname(), privObj.getObjectName());
}
tableMask.setNeedsRewrite(true);
tableMask.addTranslation(info.astNode, replacementText);
}
}
}
}
}
use of org.apache.hadoop.hive.metastore.api.SourceTable in project hive by apache.
the class TextDescTableFormatter method getViewInfo.
private void getViewInfo(StringBuilder tableInfo, Table table, boolean isOutputPadded) {
formatOutput("Original Query:", table.getViewOriginalText(), tableInfo);
formatOutput("Expanded Query:", table.getViewExpandedText(), tableInfo);
if (table.isMaterializedView()) {
formatOutput("Rewrite Enabled:", table.isRewriteEnabled() ? "Yes" : "No", tableInfo);
formatOutput("Outdated for Rewriting:", table.isOutdatedForRewriting() == null ? "Unknown" : table.isOutdatedForRewriting() ? "Yes" : "No", tableInfo);
tableInfo.append(LINE_DELIM).append("# Materialized View Source table information").append(LINE_DELIM);
TextMetaDataTable metaDataTable = new TextMetaDataTable();
metaDataTable.addRow("Table name", "I/U/D since last rebuild");
List<SourceTable> sourceTableList = new ArrayList<>(table.getMVMetadata().getSourceTables());
sourceTableList.sort(Comparator.<SourceTable, String>comparing(sourceTable -> sourceTable.getTable().getDbName()).thenComparing(sourceTable -> sourceTable.getTable().getTableName()));
for (SourceTable sourceTable : sourceTableList) {
String qualifiedTableName = TableName.getQualified(sourceTable.getTable().getCatName(), sourceTable.getTable().getDbName(), sourceTable.getTable().getTableName());
metaDataTable.addRow(qualifiedTableName, String.format("%d/%d/%d", sourceTable.getInsertedCount(), sourceTable.getUpdatedCount(), sourceTable.getDeletedCount()));
}
tableInfo.append(metaDataTable.renderTable(isOutputPadded));
}
}
use of org.apache.hadoop.hive.metastore.api.SourceTable in project hive by apache.
the class MaterializedViewMetadata method from.
private SourceTable from(SourceTable sourceTable) {
SourceTable newSourceTable = new SourceTable();
newSourceTable.setTable(sourceTable.getTable());
newSourceTable.setInsertedCount(0L);
newSourceTable.setUpdatedCount(0L);
newSourceTable.setDeletedCount(0L);
return newSourceTable;
}
use of org.apache.hadoop.hive.metastore.api.SourceTable in project hive by apache.
the class TxnHandler method wasCompacted.
private Boolean wasCompacted(CreationMetadata creationMetadata) throws MetaException {
Set<String> insertOnlyTables = new HashSet<>();
for (SourceTable sourceTable : creationMetadata.getSourceTables()) {
Table table = sourceTable.getTable();
String transactionalProp = table.getParameters().get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES);
if (!"insert_only".equalsIgnoreCase(transactionalProp)) {
continue;
}
insertOnlyTables.add(TableName.getDbTable(sourceTable.getTable().getDbName(), sourceTable.getTable().getTableName()));
}
if (insertOnlyTables.isEmpty()) {
return false;
}
// We are composing a query that returns a single row if a compaction happened after
// the materialization was created. Otherwise, query returns 0 rows.
// Parse validReaderWriteIdList from creation metadata
final ValidTxnWriteIdList validReaderWriteIdList = new ValidTxnWriteIdList(creationMetadata.getValidTxnList());
List<String> params = new ArrayList<>();
StringBuilder queryCompletedCompactions = new StringBuilder();
StringBuilder queryCompactionQueue = new StringBuilder();
// compose a query that select transactions containing an update...
queryCompletedCompactions.append("SELECT 1 FROM \"COMPLETED_COMPACTIONS\" WHERE (");
queryCompactionQueue.append("SELECT 1 FROM \"COMPACTION_QUEUE\" WHERE (");
int i = 0;
for (String fullyQualifiedName : insertOnlyTables) {
ValidWriteIdList tblValidWriteIdList = validReaderWriteIdList.getTableValidWriteIdList(fullyQualifiedName);
if (tblValidWriteIdList == null) {
LOG.warn("ValidWriteIdList for table {} not present in creation metadata, this should not happen", fullyQualifiedName);
return null;
}
// where the transaction had to be committed after the materialization was created...
if (i != 0) {
queryCompletedCompactions.append("OR");
queryCompactionQueue.append("OR");
}
String[] names = TxnUtils.getDbTableName(fullyQualifiedName);
assert (names.length == 2);
queryCompletedCompactions.append(" (\"CC_DATABASE\"=? AND \"CC_TABLE\"=?");
queryCompactionQueue.append(" (\"CQ_DATABASE\"=? AND \"CQ_TABLE\"=?");
params.add(names[0]);
params.add(names[1]);
queryCompletedCompactions.append(" AND (\"CC_HIGHEST_WRITE_ID\" > ");
queryCompletedCompactions.append(tblValidWriteIdList.getHighWatermark());
queryCompletedCompactions.append(tblValidWriteIdList.getInvalidWriteIds().length == 0 ? ") " : " OR \"CC_HIGHEST_WRITE_ID\" IN(" + StringUtils.join(",", Arrays.asList(ArrayUtils.toObject(tblValidWriteIdList.getInvalidWriteIds()))) + ") ) ");
queryCompletedCompactions.append(") ");
queryCompactionQueue.append(") ");
i++;
}
// ... and where the transaction has already been committed as per snapshot taken
// when we are running current query
queryCompletedCompactions.append(")");
queryCompactionQueue.append(") ");
// Execute query
queryCompletedCompactions.append(" UNION ");
queryCompletedCompactions.append(queryCompactionQueue);
List<String> paramsTwice = new ArrayList<>(params);
paramsTwice.addAll(params);
return executeBoolean(queryCompletedCompactions.toString(), paramsTwice, "Unable to retrieve materialization invalidation information: compactions");
}
Aggregations