use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class TableBuilder method build.
public Table build(Configuration conf) throws MetaException {
if (tableName == null) {
throw new MetaException("You must set the table name");
}
if (ownerType == null) {
ownerType = PrincipalType.USER;
}
if (owner == null) {
try {
owner = SecurityUtils.getUser();
} catch (IOException e) {
throw MetaStoreUtils.newMetaException(e);
}
}
if (catName == null)
catName = MetaStoreUtils.getDefaultCatalog(conf);
Table t = new Table(tableName, dbName, owner, createTime, lastAccessTime, retention, buildSd(), partCols, tableParams, viewOriginalText, viewExpandedText, type);
if (rewriteEnabled)
t.setRewriteEnabled(true);
if (temporary)
t.setTemporary(temporary);
t.setCatName(catName);
if (!mvReferencedTables.isEmpty()) {
Set<String> tablesUsed = mvReferencedTables.stream().map(sourceTable -> TableName.getDbTable(sourceTable.getTable().getDbName(), sourceTable.getTable().getTableName())).collect(Collectors.toSet());
CreationMetadata cm = new CreationMetadata(catName, dbName, tableName, tablesUsed);
cm.setSourceTables(mvReferencedTables);
if (mvValidTxnList != null)
cm.setValidTxnList(mvValidTxnList);
t.setCreationMetadata(cm);
}
return t;
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class HiveMaterializedViewUtils method isOutdatedMaterializedView.
/**
* Utility method that returns whether a materialized view is outdated (true), not outdated
* (false), or it cannot be determined (null). The latest case may happen e.g. when the
* materialized view definition uses external tables.
*/
public static Boolean isOutdatedMaterializedView(String validTxnsList, HiveTxnManager txnMgr, Set<TableName> tablesUsed, Table materializedViewTable) throws LockException {
List<String> tablesUsedNames = tablesUsed.stream().map(tableName -> TableName.getDbTable(tableName.getDb(), tableName.getTable())).collect(Collectors.toList());
ValidTxnWriteIdList currentTxnWriteIds = txnMgr.getValidWriteIds(tablesUsedNames, validTxnsList);
if (currentTxnWriteIds == null) {
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as we could not obtain current txn ids");
return null;
}
MaterializedViewMetadata mvMetadata = materializedViewTable.getMVMetadata();
Set<String> storedTablesUsed = materializedViewTable.getMVMetadata().getSourceTableFullNames();
if (mvMetadata.getValidTxnList() == null || mvMetadata.getValidTxnList().isEmpty()) {
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as we could not obtain materialization txn ids");
return null;
}
boolean ignore = false;
ValidTxnWriteIdList mvTxnWriteIds = new ValidTxnWriteIdList(mvMetadata.getValidTxnList());
for (String fullyQualifiedTableName : tablesUsedNames) {
// existing tables with an append-columns only join, i.e., PK-FK + not null.
if (!storedTablesUsed.contains(fullyQualifiedTableName)) {
continue;
}
ValidWriteIdList tableCurrentWriteIds = currentTxnWriteIds.getTableValidWriteIdList(fullyQualifiedTableName);
if (tableCurrentWriteIds == null) {
// Uses non-transactional table, cannot be considered
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as it is outdated and cannot be considered for " + " rewriting because it uses non-transactional table " + fullyQualifiedTableName);
ignore = true;
break;
}
ValidWriteIdList tableWriteIds = mvTxnWriteIds.getTableValidWriteIdList(fullyQualifiedTableName);
if (tableWriteIds == null) {
// This should not happen, but we ignore for safety
LOG.warn("Materialized view " + materializedViewTable.getFullyQualifiedName() + " ignored for rewriting as details about txn ids for table " + fullyQualifiedTableName + " could not be found in " + mvTxnWriteIds);
ignore = true;
break;
}
if (!TxnIdUtils.checkEquivalentWriteIds(tableCurrentWriteIds, tableWriteIds)) {
LOG.debug("Materialized view " + materializedViewTable.getFullyQualifiedName() + " contents are outdated");
return true;
}
}
if (ignore) {
return null;
}
return false;
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class TruncateTableHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
AlterTableMessage msg = deserializer.getAlterTableMessage(context.dmd.getPayload());
final TableName tName = TableName.fromString(msg.getTable(), null, context.isDbNameEmpty() ? msg.getDB() : context.dbName);
TruncateTableDesc truncateTableDesc = new TruncateTableDesc(tName, null, context.eventOnlyReplicationSpec());
truncateTableDesc.setWriteId(msg.getWriteId());
Task<DDLWork> truncateTableTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
context.log.debug("Added truncate tbl task : {}:{}:{}", truncateTableTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId());
updatedMetadata.set(context.dmd.getEventTo().toString(), tName.getDb(), tName.getTable(), null);
try {
return ReplUtils.addChildTask(truncateTableTask);
} catch (Exception e) {
throw new SemanticException(e.getMessage());
}
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class TruncatePartitionHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
AlterPartitionMessage msg = deserializer.getAlterPartitionMessage(context.dmd.getPayload());
final TableName tName = TableName.fromString(msg.getTable(), null, context.isDbNameEmpty() ? msg.getDB() : context.dbName);
Map<String, String> partSpec = new LinkedHashMap<>();
org.apache.hadoop.hive.metastore.api.Table tblObj;
try {
tblObj = msg.getTableObj();
Iterator<String> afterIterator = msg.getPtnObjAfter().getValuesIterator();
for (FieldSchema fs : tblObj.getPartitionKeys()) {
partSpec.put(fs.getName(), afterIterator.next());
}
} catch (Exception e) {
if (!(e instanceof SemanticException)) {
throw new SemanticException("Error reading message members", e);
} else {
throw (SemanticException) e;
}
}
TruncateTableDesc truncateTableDesc = new TruncateTableDesc(tName, partSpec, context.eventOnlyReplicationSpec());
truncateTableDesc.setWriteId(msg.getWriteId());
Task<DDLWork> truncatePtnTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, truncateTableDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
context.log.debug("Added truncate ptn task : {}:{}:{}", truncatePtnTask.getId(), truncateTableDesc.getTableName(), truncateTableDesc.getWriteId());
updatedMetadata.set(context.dmd.getEventTo().toString(), tName.getDb(), tName.getTable(), partSpec);
try {
return ReplUtils.addChildTask(truncatePtnTask);
} catch (Exception e) {
throw new SemanticException(e.getMessage());
}
}
use of org.apache.hadoop.hive.common.TableName in project hive by apache.
the class AddPrimaryKeyHandler method handle.
@Override
public List<Task<?>> handle(Context context) throws SemanticException {
AddPrimaryKeyMessage msg = deserializer.getAddPrimaryKeyMessage(context.dmd.getPayload());
List<SQLPrimaryKey> pks;
try {
pks = msg.getPrimaryKeys();
} catch (Exception e) {
if (!(e instanceof SemanticException)) {
throw new SemanticException("Error reading message members", e);
} else {
throw (SemanticException) e;
}
}
List<Task<?>> tasks = new ArrayList<Task<?>>();
if (pks.isEmpty()) {
return tasks;
}
final String actualDbName = context.isDbNameEmpty() ? pks.get(0).getTable_db() : context.dbName;
final String actualTblName = pks.get(0).getTable_name();
final TableName tName = TableName.fromString(actualTblName, null, actualDbName);
for (SQLPrimaryKey pk : pks) {
pk.setTable_db(actualDbName);
pk.setTable_name(actualTblName);
}
Constraints constraints = new Constraints(pks, null, null, null, null, null);
AlterTableAddConstraintDesc addConstraintsDesc = new AlterTableAddConstraintDesc(tName, context.eventOnlyReplicationSpec(), constraints);
Task<DDLWork> addConstraintsTask = TaskFactory.get(new DDLWork(readEntitySet, writeEntitySet, addConstraintsDesc, true, context.getDumpDirectory(), context.getMetricCollector()), context.hiveConf);
tasks.add(addConstraintsTask);
context.log.debug("Added add constrains task : {}:{}", addConstraintsTask.getId(), actualTblName);
updatedMetadata.set(context.dmd.getEventTo().toString(), actualDbName, actualTblName, null);
return Collections.singletonList(addConstraintsTask);
}
Aggregations