use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class MacroSemanticAnalyzer method addEntities.
private void addEntities() throws SemanticException {
Database database = getDatabase(Warehouse.DEFAULT_DATABASE_NAME);
// This restricts macro creation to privileged users.
outputs.add(new WriteEntity(database, WriteEntity.WriteType.DDL_NO_LOCK));
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class DummyTxnManager method acquireLocks.
@Override
public void acquireLocks(QueryPlan plan, Context ctx, String username, LockedDriverState lDrvState) throws LockException {
// Make sure we've built the lock manager
getLockManager();
// lock manager
if (lockMgr == null) {
return;
}
List<HiveLockObj> lockObjects = new ArrayList<HiveLockObj>();
// its parents also
for (ReadEntity input : plan.getInputs()) {
if (!input.needsLock()) {
continue;
}
LOG.debug("Adding " + input.getName() + " to list of lock inputs");
if (input.getType() == ReadEntity.Type.DATABASE) {
lockObjects.addAll(getLockObjects(plan, input.getDatabase(), null, null, HiveLockMode.SHARED));
} else if (input.getType() == ReadEntity.Type.TABLE) {
lockObjects.addAll(getLockObjects(plan, null, input.getTable(), null, HiveLockMode.SHARED));
} else {
lockObjects.addAll(getLockObjects(plan, null, null, input.getPartition(), HiveLockMode.SHARED));
}
}
for (WriteEntity output : plan.getOutputs()) {
HiveLockMode lockMode = getWriteEntityLockMode(output);
if (lockMode == null) {
continue;
}
LOG.debug("Adding " + output.getName() + " to list of lock outputs");
List<HiveLockObj> lockObj = null;
if (output.getType() == WriteEntity.Type.DATABASE) {
lockObjects.addAll(getLockObjects(plan, output.getDatabase(), null, null, lockMode));
} else if (output.getTyp() == WriteEntity.Type.TABLE) {
lockObj = getLockObjects(plan, null, output.getTable(), null, lockMode);
} else if (output.getTyp() == WriteEntity.Type.PARTITION) {
lockObj = getLockObjects(plan, null, null, output.getPartition(), lockMode);
} else // In case of dynamic queries, it is possible to have incomplete dummy partitions
if (output.getTyp() == WriteEntity.Type.DUMMYPARTITION) {
lockObj = getLockObjects(plan, null, null, output.getPartition(), HiveLockMode.SHARED);
}
if (lockObj != null) {
lockObjects.addAll(lockObj);
ctx.getOutputLockObjects().put(output, lockObj);
}
}
if (lockObjects.isEmpty() && !ctx.isNeedLockMgr()) {
return;
}
dedupLockObjects(lockObjects);
List<HiveLock> hiveLocks = lockMgr.lock(lockObjects, false, lDrvState);
if (hiveLocks == null) {
throw new LockException(ErrorMsg.LOCK_CANNOT_BE_ACQUIRED.getMsg());
} else {
ctx.setHiveLocks(hiveLocks);
}
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class DDLTask method createView.
/**
* Create a new view.
*
* @param db
* The database in question.
* @param crtView
* This is the view we're creating.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int createView(Hive db, CreateViewDesc crtView) throws HiveException {
Table oldview = db.getTable(crtView.getViewName(), false);
if (oldview != null) {
// Check whether we are replicating
if (crtView.getReplicationSpec().isInReplicationScope()) {
// if this is a replication spec, then replace-mode semantics might apply.
if (crtView.getReplicationSpec().allowEventReplacementInto(oldview.getParameters())) {
// we replace existing view.
crtView.setReplace(true);
} else {
LOG.debug("DDLTask: Create View is skipped as view {} is newer than update", // no replacement, the existing table state is newer than our update.
crtView.getViewName());
return 0;
}
}
if (!crtView.isReplace()) {
// View already exists, thus we should be replacing
throw new HiveException(ErrorMsg.TABLE_ALREADY_EXISTS.getMsg(crtView.getViewName()));
}
// It should not be a materialized view
assert !crtView.isMaterialized();
// replace existing view
// remove the existing partition columns from the field schema
oldview.setViewOriginalText(crtView.getViewOriginalText());
oldview.setViewExpandedText(crtView.getViewExpandedText());
oldview.setFields(crtView.getSchema());
if (crtView.getComment() != null) {
oldview.setProperty("comment", crtView.getComment());
}
if (crtView.getTblProps() != null) {
oldview.getTTable().getParameters().putAll(crtView.getTblProps());
}
oldview.setPartCols(crtView.getPartCols());
if (crtView.getInputFormat() != null) {
oldview.setInputFormatClass(crtView.getInputFormat());
}
if (crtView.getOutputFormat() != null) {
oldview.setOutputFormatClass(crtView.getOutputFormat());
}
oldview.checkValidity(null);
db.alterTable(crtView.getViewName(), oldview, null);
addIfAbsentByName(new WriteEntity(oldview, WriteEntity.WriteType.DDL_NO_LOCK));
} else {
// We create new view
Table tbl = crtView.toTable(conf);
// We set the signature for the view if it is a materialized view
if (tbl.isMaterializedView()) {
CreationMetadata cm = new CreationMetadata(tbl.getDbName(), tbl.getTableName(), ImmutableSet.copyOf(crtView.getTablesUsed()));
cm.setValidTxnList(conf.get(ValidTxnList.VALID_TXNS_KEY));
tbl.getTTable().setCreationMetadata(cm);
}
db.createTable(tbl, crtView.getIfNotExists());
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
// set lineage info
DataContainer dc = new DataContainer(tbl.getTTable());
queryState.getLineageState().setLineage(new Path(crtView.getViewName()), dc, tbl.getCols());
}
return 0;
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class DDLTask method dropTable.
private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException {
// This is a true DROP TABLE
if (tbl != null && dropTbl.getValidationRequired()) {
if (tbl.isView()) {
if (!dropTbl.getExpectView()) {
if (dropTbl.getIfExists()) {
return;
}
if (dropTbl.getExpectMaterializedView()) {
throw new HiveException("Cannot drop a view with DROP MATERIALIZED VIEW");
} else {
throw new HiveException("Cannot drop a view with DROP TABLE");
}
}
} else if (tbl.isMaterializedView()) {
if (!dropTbl.getExpectMaterializedView()) {
if (dropTbl.getIfExists()) {
return;
}
if (dropTbl.getExpectView()) {
throw new HiveException("Cannot drop a materialized view with DROP VIEW");
} else {
throw new HiveException("Cannot drop a materialized view with DROP TABLE");
}
}
} else {
if (dropTbl.getExpectView()) {
if (dropTbl.getIfExists()) {
return;
}
throw new HiveException("Cannot drop a base table with DROP VIEW");
} else if (dropTbl.getExpectMaterializedView()) {
if (dropTbl.getIfExists()) {
return;
}
throw new HiveException("Cannot drop a base table with DROP MATERIALIZED VIEW");
}
}
}
ReplicationSpec replicationSpec = dropTbl.getReplicationSpec();
if ((tbl != null) && replicationSpec.isInReplicationScope()) {
/**
* DROP TABLE FOR REPLICATION behaves differently from DROP TABLE IF EXISTS - it more closely
* matches a DROP TABLE IF OLDER THAN(x) semantic.
*
* Ideally, commands executed under the scope of replication need to be idempotent and resilient
* to repeats. What can happen, sometimes, is that a drone processing a replication task can
* have been abandoned for not returning in time, but still execute its task after a while,
* which should not result in it mucking up data that has been impressed later on. So, for eg.,
* if we create partition P1, followed by droppping it, followed by creating it yet again,
* the replication of that drop should not drop the newer partition if it runs after the destination
* object is already in the newer state.
*
* Thus, we check the replicationSpec.allowEventReplacementInto to determine whether or not we can
* drop the object in question(will return false if object is newer than the event, true if not)
*
* In addition, since DROP TABLE FOR REPLICATION can result in a table not being dropped, while DROP
* TABLE will always drop the table, and the included partitions, DROP TABLE FOR REPLICATION must
* do one more thing - if it does not drop the table because the table is in a newer state, it must
* drop the partitions inside it that are older than this event. To wit, DROP TABLE FOR REPL
* acts like a recursive DROP TABLE IF OLDER.
*/
if (!replicationSpec.allowEventReplacementInto(tbl.getParameters())) {
// any partitions inside that are older.
if (tbl.isPartitioned()) {
PartitionIterable partitions = new PartitionIterable(db, tbl, null, conf.getIntVar(HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX));
for (Partition p : Iterables.filter(partitions, replicationSpec.allowEventReplacementInto())) {
db.dropPartition(tbl.getDbName(), tbl.getTableName(), p.getValues(), true);
}
}
LOG.debug("DDLTask: Drop Table is skipped as table {} is newer than update", dropTbl.getTableName());
// table is newer, leave it be.
return;
}
}
// drop the table
db.dropTable(dropTbl.getTableName(), dropTbl.getIfPurge());
if (tbl != null) {
// Remove from cache if it is a materialized view
if (tbl.isMaterializedView()) {
HiveMaterializedViewsRegistry.get().dropMaterializedView(tbl);
}
// We have already locked the table in DDLSemanticAnalyzer, don't do it again here
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
}
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class DDLTask method alterTable.
/**
* Alter a given table.
*
* @param db
* The database in question.
* @param alterTbl
* This is the table we're altering.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException {
if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
String[] names = Utilities.getDbTableName(alterTbl.getOldName());
if (Utils.isBootstrapDumpInProgress(db, names[0])) {
LOG.error("DDLTask: Rename Table not allowed as bootstrap dump in progress");
throw new HiveException("Rename Table: Not allowed as bootstrap dump in progress");
}
}
// alter the table
Table tbl = db.getTable(alterTbl.getOldName());
List<Partition> allPartitions = null;
if (alterTbl.getPartSpec() != null) {
Map<String, String> partSpec = alterTbl.getPartSpec();
if (DDLSemanticAnalyzer.isFullSpec(tbl, partSpec)) {
allPartitions = new ArrayList<Partition>();
Partition part = db.getPartition(tbl, partSpec, false);
if (part == null) {
// User provided a fully specified partition spec but it doesn't exist, fail.
throw new HiveException(ErrorMsg.INVALID_PARTITION, StringUtils.join(alterTbl.getPartSpec().keySet(), ',') + " for table " + alterTbl.getOldName());
}
allPartitions.add(part);
} else {
// DDLSemanticAnalyzer has already checked if partial partition specs are allowed,
// thus we should not need to check it here.
allPartitions = db.getPartitions(tbl, alterTbl.getPartSpec());
}
}
// Don't change the table object returned by the metastore, as we'll mess with it's caches.
Table oldTbl = tbl;
tbl = oldTbl.copy();
// but let's make it a little bit more explicit.
if (allPartitions != null) {
// Alter all partitions
for (Partition part : allPartitions) {
addChildTasks(alterTableOrSinglePartition(alterTbl, tbl, part));
}
} else {
// Just alter the table
addChildTasks(alterTableOrSinglePartition(alterTbl, tbl, null));
}
if (allPartitions == null) {
updateModifiedParameters(tbl.getTTable().getParameters(), conf);
tbl.checkValidity(conf);
} else {
for (Partition tmpPart : allPartitions) {
updateModifiedParameters(tmpPart.getParameters(), conf);
}
}
try {
if (allPartitions == null) {
db.alterTable(alterTbl.getOldName(), tbl, alterTbl.getIsCascade(), alterTbl.getEnvironmentContext());
} else {
db.alterPartitions(Warehouse.getQualifiedName(tbl.getTTable()), allPartitions, alterTbl.getEnvironmentContext());
}
// Add constraints if necessary
addConstraints(db, alterTbl);
} catch (InvalidOperationException e) {
LOG.error("alter table: ", e);
throw new HiveException(e, ErrorMsg.GENERIC_ERROR);
}
// Don't acquire locks for any of these, we have already asked for them in DDLSemanticAnalyzer.
if (allPartitions != null) {
for (Partition tmpPart : allPartitions) {
work.getInputs().add(new ReadEntity(tmpPart));
addIfAbsentByName(new WriteEntity(tmpPart, WriteEntity.WriteType.DDL_NO_LOCK));
}
} else {
work.getInputs().add(new ReadEntity(oldTbl));
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
}
return 0;
}
Aggregations