use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class DDLTask method alterTableAlterPart.
/**
* Alter partition column type in a table
*
* @param db
* Database to rename the partition.
* @param alterPartitionDesc
* change partition column type.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
*/
private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionDesc) throws HiveException {
Table tbl = db.getTable(alterPartitionDesc.getTableName(), true);
// This is checked by DDLSemanticAnalyzer
assert (tbl.isPartitioned());
List<FieldSchema> newPartitionKeys = new ArrayList<FieldSchema>();
// with a non null value before trying to alter the partition column type.
try {
Set<Partition> partitions = db.getAllPartitionsOf(tbl);
int colIndex = -1;
for (FieldSchema col : tbl.getTTable().getPartitionKeys()) {
colIndex++;
if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) {
break;
}
}
if (colIndex == -1 || colIndex == tbl.getTTable().getPartitionKeys().size()) {
throw new HiveException("Cannot find partition column " + alterPartitionDesc.getPartKeySpec().getName());
}
TypeInfo expectedType = TypeInfoUtils.getTypeInfoFromTypeString(alterPartitionDesc.getPartKeySpec().getType());
ObjectInspector outputOI = TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(expectedType);
Converter converter = ObjectInspectorConverters.getConverter(PrimitiveObjectInspectorFactory.javaStringObjectInspector, outputOI);
// For all the existing partitions, check if the value can be type casted to a non-null object
for (Partition part : partitions) {
if (part.getName().equals(conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) {
continue;
}
try {
String value = part.getValues().get(colIndex);
Object convertedValue = converter.convert(value);
if (convertedValue == null) {
throw new HiveException(" Converting from " + TypeInfoFactory.stringTypeInfo + " to " + expectedType + " for value : " + value + " resulted in NULL object");
}
} catch (Exception e) {
throw new HiveException("Exception while converting " + TypeInfoFactory.stringTypeInfo + " to " + expectedType + " for value : " + part.getValues().get(colIndex));
}
}
} catch (Exception e) {
throw new HiveException("Exception while checking type conversion of existing partition values to " + alterPartitionDesc.getPartKeySpec() + " : " + e.getMessage());
}
for (FieldSchema col : tbl.getTTable().getPartitionKeys()) {
if (col.getName().compareTo(alterPartitionDesc.getPartKeySpec().getName()) == 0) {
newPartitionKeys.add(alterPartitionDesc.getPartKeySpec());
} else {
newPartitionKeys.add(col);
}
}
tbl.getTTable().setPartitionKeys(newPartitionKeys);
db.alterTable(tbl, null);
work.getInputs().add(new ReadEntity(tbl));
// We've already locked the table as the input, don't relock it as the output.
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
return 0;
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class DDLTask method touch.
/**
* Rewrite the partition's metadata and force the pre/post execute hooks to
* be fired.
*
* @param db
* @param touchDesc
* @return
* @throws HiveException
*/
private int touch(Hive db, AlterTableSimpleDesc touchDesc) throws HiveException {
Table tbl = db.getTable(touchDesc.getTableName());
EnvironmentContext environmentContext = new EnvironmentContext();
environmentContext.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
if (touchDesc.getPartSpec() == null) {
db.alterTable(tbl, environmentContext);
work.getInputs().add(new ReadEntity(tbl));
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
} else {
Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
if (part == null) {
throw new HiveException("Specified partition does not exist");
}
try {
db.alterPartition(touchDesc.getTableName(), part, environmentContext);
} catch (InvalidOperationException e) {
throw new HiveException(e);
}
work.getInputs().add(new ReadEntity(part));
addIfAbsentByName(new WriteEntity(part, WriteEntity.WriteType.DDL_NO_LOCK));
}
return 0;
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class DDLTask method createTable.
/**
* Create a new table.
*
* @param db
* The database in question.
* @param crtTbl
* This is the table we're creating.
* @return Returns 0 when execution succeeds and above 0 if it fails.
* @throws HiveException
* Throws this exception if an unexpected error occurs.
*/
private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
// create the table
Table tbl = crtTbl.toTable(conf);
List<SQLPrimaryKey> primaryKeys = crtTbl.getPrimaryKeys();
List<SQLForeignKey> foreignKeys = crtTbl.getForeignKeys();
List<SQLUniqueConstraint> uniqueConstraints = crtTbl.getUniqueConstraints();
List<SQLNotNullConstraint> notNullConstraints = crtTbl.getNotNullConstraints();
List<SQLDefaultConstraint> defaultConstraints = crtTbl.getDefaultConstraints();
List<SQLCheckConstraint> checkConstraints = crtTbl.getCheckConstraints();
LOG.debug("creating table {} on {}", tbl.getFullyQualifiedName(), tbl.getDataLocation());
if (crtTbl.getReplicationSpec().isInReplicationScope() && (!crtTbl.getReplaceMode())) {
// if this is a replication spec, then replace-mode semantics might apply.
// if we're already asking for a table replacement, then we can skip this check.
// however, otherwise, if in replication scope, and we've not been explicitly asked
// to replace, we should check if the object we're looking at exists, and if so,
// trigger replace-mode semantics.
Table existingTable = db.getTable(tbl.getDbName(), tbl.getTableName(), false);
if (existingTable != null) {
if (crtTbl.getReplicationSpec().allowEventReplacementInto(existingTable.getParameters())) {
// we replace existing table.
crtTbl.setReplaceMode(true);
} else {
LOG.debug("DDLTask: Create Table is skipped as table {} is newer than update", crtTbl.getTableName());
// no replacement, the existing table state is newer than our update.
return 0;
}
}
}
// create the table
if (crtTbl.getReplaceMode()) {
// replace-mode creates are really alters using CreateTableDesc.
db.alterTable(tbl, null);
} else {
if ((foreignKeys != null && foreignKeys.size() > 0) || (primaryKeys != null && primaryKeys.size() > 0) || (uniqueConstraints != null && uniqueConstraints.size() > 0) || (notNullConstraints != null && notNullConstraints.size() > 0) || (checkConstraints != null && checkConstraints.size() > 0) || defaultConstraints != null && defaultConstraints.size() > 0) {
db.createTable(tbl, crtTbl.getIfNotExists(), primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
} else {
db.createTable(tbl, crtTbl.getIfNotExists());
}
Long mmWriteId = crtTbl.getInitialMmWriteId();
if (crtTbl.isCTAS() || mmWriteId != null) {
Table createdTable = db.getTable(tbl.getDbName(), tbl.getTableName());
if (crtTbl.isCTAS()) {
DataContainer dc = new DataContainer(createdTable.getTTable());
queryState.getLineageState().setLineage(createdTable.getPath(), dc, createdTable.getCols());
}
}
}
addIfAbsentByName(new WriteEntity(tbl, WriteEntity.WriteType.DDL_NO_LOCK));
return 0;
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class DDLTask method exchangeTablePartition.
private int exchangeTablePartition(Hive db, AlterTableExchangePartition exchangePartition) throws HiveException {
Map<String, String> partitionSpecs = exchangePartition.getPartitionSpecs();
Table destTable = exchangePartition.getDestinationTable();
Table sourceTable = exchangePartition.getSourceTable();
List<Partition> partitions = db.exchangeTablePartitions(partitionSpecs, sourceTable.getDbName(), sourceTable.getTableName(), destTable.getDbName(), destTable.getTableName());
for (Partition partition : partitions) {
// Reuse the partition specs from dest partition since they should be the same
work.getInputs().add(new ReadEntity(new Partition(sourceTable, partition.getSpec(), null)));
addIfAbsentByName(new WriteEntity(new Partition(sourceTable, partition.getSpec(), null), WriteEntity.WriteType.DELETE));
addIfAbsentByName(new WriteEntity(new Partition(destTable, partition.getSpec(), null), WriteEntity.WriteType.INSERT));
}
return 0;
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class MoveTask method releaseLocks.
// Release all the locks acquired for this object
// This becomes important for multi-table inserts when one branch may take much more
// time than the others. It is better to release the lock for this particular insert.
// The other option is to wait for all the branches to finish, or set
// hive.multi.insert.move.tasks.share.dependencies to true, which will mean that the
// first multi-insert results will be available when all of the branches of multi-table
// inserts are done.
private void releaseLocks(LoadTableDesc ltd) throws HiveException {
// nothing needs to be done
if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY)) {
return;
}
Context ctx = driverContext.getCtx();
if (ctx.getHiveTxnManager().supportsAcid()) {
// Acid LM doesn't maintain getOutputLockObjects(); this 'if' just makes logic more explicit
return;
}
HiveLockManager lockMgr = ctx.getHiveTxnManager().getLockManager();
WriteEntity output = ctx.getLoadTableOutputMap().get(ltd);
List<HiveLockObj> lockObjects = ctx.getOutputLockObjects().get(output);
if (lockObjects == null) {
return;
}
for (HiveLockObj lockObj : lockObjects) {
List<HiveLock> locks = lockMgr.getLocks(lockObj.getObj(), false, true);
for (HiveLock lock : locks) {
if (lock.getHiveLockMode() == lockObj.getMode()) {
if (ctx.getHiveLocks().remove(lock)) {
LOG.info("about to release lock for output: {} lock: {}", output, lock.getHiveLockObject().getName());
try {
lockMgr.unlock(lock);
} catch (LockException le) {
// should be OK since the lock is ephemeral and will eventually be deleted
// when the query finishes and zookeeper session is closed.
LOG.warn("Could not release lock {}", lock.getHiveLockObject().getName());
}
}
}
}
}
}
Aggregations