use of org.apache.hadoop.hive.ql.metadata.MaterializedViewMetadata in project hive by apache.
the class HiveIncrementalRelMdRowCount method source.
public static RelMetadataProvider source(RelOptMaterialization materialization) {
MaterializedViewMetadata mvMetadata = ((RelOptHiveTable) materialization.tableRel.getTable()).getHiveTableMD().getMVMetadata();
Map<String, SourceTable> sourceTableMap = new HashMap<>(mvMetadata.getSourceTables().size());
for (SourceTable sourceTable : mvMetadata.getSourceTables()) {
Table table = sourceTable.getTable();
sourceTableMap.put(TableName.getQualified(table.getCatName(), table.getDbName(), table.getTableName()), sourceTable);
}
return ReflectiveRelMetadataProvider.reflectiveSource(BuiltInMethod.ROW_COUNT.method, new HiveIncrementalRelMdRowCount(sourceTableMap));
}
use of org.apache.hadoop.hive.ql.metadata.MaterializedViewMetadata in project hive by apache.
the class HiveMaterializedViewUtils method isOutdatedMaterializedView.
/**
* Utility method that returns whether a materialized view is outdated (true), not outdated
* (false), or it cannot be determined (null). The latest case may happen e.g. when the
* materialized view definition uses external tables.
*/
public static Boolean isOutdatedMaterializedView(String validTxnsList, HiveTxnManager txnMgr, Set<TableName> tablesUsed, Table materializedViewTable) throws LockException {
List<String> tablesUsedNames = tablesUsed.stream().map(tableName -> TableName.getDbTable(tableName.getDb(), tableName.getTable())).collect(Collectors.toList());
ValidTxnWriteIdList currentTxnWriteIds = txnMgr.getValidWriteIds(tablesUsedNames, validTxnsList);
if (currentTxnWriteIds == null) {
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as we could not obtain current txn ids");
return null;
}
MaterializedViewMetadata mvMetadata = materializedViewTable.getMVMetadata();
Set<String> storedTablesUsed = materializedViewTable.getMVMetadata().getSourceTableFullNames();
if (mvMetadata.getValidTxnList() == null || mvMetadata.getValidTxnList().isEmpty()) {
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as we could not obtain materialization txn ids");
return null;
}
boolean ignore = false;
ValidTxnWriteIdList mvTxnWriteIds = new ValidTxnWriteIdList(mvMetadata.getValidTxnList());
for (String fullyQualifiedTableName : tablesUsedNames) {
// existing tables with an append-columns only join, i.e., PK-FK + not null.
if (!storedTablesUsed.contains(fullyQualifiedTableName)) {
continue;
}
ValidWriteIdList tableCurrentWriteIds = currentTxnWriteIds.getTableValidWriteIdList(fullyQualifiedTableName);
if (tableCurrentWriteIds == null) {
// Uses non-transactional table, cannot be considered
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as it is outdated and cannot be considered for " + " rewriting because it uses non-transactional table " + fullyQualifiedTableName);
ignore = true;
break;
}
ValidWriteIdList tableWriteIds = mvTxnWriteIds.getTableValidWriteIdList(fullyQualifiedTableName);
if (tableWriteIds == null) {
// This should not happen, but we ignore for safety
LOG.warn("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as details about txn ids for table " + fullyQualifiedTableName + " could not be found in " + mvTxnWriteIds);
ignore = true;
break;
}
if (!TxnIdUtils.checkEquivalentWriteIds(tableCurrentWriteIds, tableWriteIds)) {
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " contents are outdated");
return true;
}
}
if (ignore) {
return null;
}
return false;
}
use of org.apache.hadoop.hive.ql.metadata.MaterializedViewMetadata in project hive by apache.
the class CreateMaterializedViewOperation method execute.
@Override
public int execute() throws HiveException {
Table oldview = context.getDb().getTable(desc.getViewName(), false);
if (oldview != null) {
if (desc.getIfNotExists()) {
return 0;
}
// Materialized View already exists, thus we should be replacing
throw new HiveException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(desc.getViewName()));
} else {
// We create new view
Table tbl = desc.toTable(context.getConf());
// We set the signature for the view if it is a materialized view
if (tbl.isMaterializedView()) {
Set<SourceTable> sourceTables = new HashSet<>(desc.getTablesUsed().size());
for (TableName tableName : desc.getTablesUsed()) {
sourceTables.add(context.getDb().getTable(tableName).createSourceTable());
}
MaterializedViewMetadata metadata = new MaterializedViewMetadata(MetaStoreUtils.getDefaultCatalog(context.getConf()), tbl.getDbName(), tbl.getTableName(), sourceTables, context.getConf().get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY));
tbl.setMaterializedViewMetadata(metadata);
}
context.getDb().createTable(tbl, desc.getIfNotExists());
DDLUtils.addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK), context.getWork().getOutputs());
// set lineage info
DataContainer dc = new DataContainer(tbl.getTTable());
Map<String, String> tblProps = tbl.getTTable().getParameters();
Path tlocation = null;
try {
Warehouse wh = new Warehouse(context.getConf());
tlocation = wh.getDefaultTablePath(context.getDb().getDatabase(tbl.getDbName()), tbl.getTableName(), tblProps == null || !AcidUtils.isTablePropertyTransactional(tblProps));
} catch (MetaException e) {
throw new HiveException(e);
}
context.getQueryState().getLineageState().setLineage(tlocation, dc, tbl.getCols());
}
return 0;
}
use of org.apache.hadoop.hive.ql.metadata.MaterializedViewMetadata in project hive by apache.
the class MaterializedViewUpdateOperation method execute.
@Override
public int execute() throws HiveException {
if (context.getContext().getExplainAnalyze() == AnalyzeState.RUNNING) {
return 0;
}
try {
if (desc.isRetrieveAndInclude()) {
Table mvTable = context.getDb().getTable(desc.getName());
HiveMaterializedViewsRegistry.get().createMaterializedView(context.getDb().getConf(), mvTable);
} else if (desc.isDisableRewrite()) {
// Disabling rewriting, removing from cache
String[] names = desc.getName().split("\\.");
HiveMaterializedViewsRegistry.get().dropMaterializedView(names[0], names[1]);
} else if (desc.isUpdateCreationMetadata()) {
// We need to update the status of the creation signature
Table mvTable = context.getDb().getTable(desc.getName());
MaterializedViewMetadata newMetadata = mvTable.getMVMetadata().reset(context.getConf().get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY));
context.getDb().updateCreationMetadata(mvTable.getDbName(), mvTable.getTableName(), newMetadata);
mvTable.setMaterializedViewMetadata(newMetadata);
HiveMaterializedViewsRegistry.get().refreshMaterializedView(context.getDb().getConf(), mvTable);
}
} catch (HiveException e) {
LOG.debug("Exception during materialized view cache update", e);
context.getTask().setException(e);
}
return 0;
}
Aggregations