use of org.apache.hadoop.hive.ql.metadata.DummyPartition in project hive by apache.
the class DummyTxnManager method getLockObjects.
private List<HiveLockObj> getLockObjects(QueryPlan plan, Database db, Table t, Partition p, HiveLockMode mode) throws LockException {
List<HiveLockObj> locks = new LinkedList<HiveLockObj>();
HiveLockObject.HiveLockObjectData lockData = new HiveLockObject.HiveLockObjectData(plan.getQueryId(), String.valueOf(System.currentTimeMillis()), "IMPLICIT", plan.getQueryStr(), conf);
if (db != null) {
locks.add(new HiveLockObj(new HiveLockObject(db.getName(), lockData), mode));
return locks;
}
if (t != null) {
locks.add(new HiveLockObj(new HiveLockObject(t, lockData), mode));
mode = HiveLockMode.SHARED;
locks.add(new HiveLockObj(new HiveLockObject(t.getDbName(), lockData), mode));
return locks;
}
if (p != null) {
if (!(p instanceof DummyPartition)) {
locks.add(new HiveLockObj(new HiveLockObject(p, lockData), mode));
}
// All the parents are locked in shared mode
mode = HiveLockMode.SHARED;
// For dummy partitions, only partition name is needed
String name = p.getName();
if (p instanceof DummyPartition) {
name = p.getName().split("@")[2];
}
String partialName = "";
String[] partns = name.split("/");
int len = p instanceof DummyPartition ? partns.length : partns.length - 1;
Map<String, String> partialSpec = new LinkedHashMap<String, String>();
for (int idx = 0; idx < len; idx++) {
String partn = partns[idx];
partialName += partn;
String[] nameValue = partn.split("=");
assert (nameValue.length == 2);
partialSpec.put(nameValue[0], nameValue[1]);
try {
locks.add(new HiveLockObj(new HiveLockObject(new DummyPartition(p.getTable(), p.getTable().getDbName() + "/" + FileUtils.escapePathName(p.getTable().getTableName()).toLowerCase() + "/" + partialName, partialSpec), lockData), mode));
partialName += "/";
} catch (HiveException e) {
throw new LockException(e.getMessage());
}
}
locks.add(new HiveLockObj(new HiveLockObject(p.getTable(), lockData), mode));
locks.add(new HiveLockObj(new HiveLockObject(p.getTable().getDbName(), lockData), mode));
}
return locks;
}
use of org.apache.hadoop.hive.ql.metadata.DummyPartition in project hive by apache.
the class SemanticAnalyzer method generateTableWriteEntity.
private WriteEntity generateTableWriteEntity(String dest, Table dest_tab, Map<String, String> partSpec, LoadTableDesc ltd, DynamicPartitionCtx dpCtx, boolean isNonNativeTable) throws SemanticException {
WriteEntity output = null;
// list of dynamically created partitions are known.
if ((dpCtx == null || dpCtx.getNumDPCols() == 0)) {
output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable, dest));
if (!outputs.add(output)) {
throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(dest_tab.getTableName()));
}
}
if ((dpCtx != null) && (dpCtx.getNumDPCols() >= 0)) {
// No static partition specified
if (dpCtx.getNumSPCols() == 0) {
output = new WriteEntity(dest_tab, determineWriteType(ltd, isNonNativeTable, dest), false);
outputs.add(output);
output.setDynamicPartitionWrite(true);
} else // part of the partition specified
// Create a DummyPartition in this case. Since, the metastore does not store partial
// partitions currently, we need to store dummy partitions
{
try {
String ppath = dpCtx.getSPPath();
ppath = ppath.substring(0, ppath.length() - 1);
DummyPartition p = new DummyPartition(dest_tab, dest_tab.getDbName() + "@" + dest_tab.getTableName() + "@" + ppath, partSpec);
output = new WriteEntity(p, getWriteType(dest), false);
output.setDynamicPartitionWrite(true);
outputs.add(output);
} catch (HiveException e) {
throw new SemanticException(e.getMessage(), e);
}
}
}
return output;
}
use of org.apache.hadoop.hive.ql.metadata.DummyPartition in project hive by apache.
the class TestDbTxnManager method addDynamicPartitionedOutput.
private WriteEntity addDynamicPartitionedOutput(Table t, WriteEntity.WriteType writeType) throws Exception {
DummyPartition dp = new DummyPartition(t, "no clue what I should call this");
WriteEntity we = new WriteEntity(dp, writeType, false);
writeEntities.add(we);
return we;
}
use of org.apache.hadoop.hive.ql.metadata.DummyPartition in project hive by apache.
the class SemanticAnalyzer method generateTableWriteEntity.
private WriteEntity generateTableWriteEntity(String dest, Table dest_tab, Map<String, String> partSpec, LoadTableDesc ltd, DynamicPartitionCtx dpCtx) throws SemanticException {
WriteEntity output = null;
// list of dynamically created partitions are known.
if ((dpCtx == null || dpCtx.getNumDPCols() == 0)) {
output = new WriteEntity(dest_tab, determineWriteType(ltd, dest));
if (!outputs.add(output)) {
if (!((this instanceof MergeSemanticAnalyzer) && conf.getBoolVar(ConfVars.MERGE_SPLIT_UPDATE))) {
/**
* Merge stmt with early split update may create several (2) writes to the same
* table with the same {@link WriteType}, e.g. if original Merge stmt has both update and
* delete clauses, and update is split into insert + delete, in which case it's not an
* error
*/
throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES.getMsg(dest_tab.getTableName()));
}
}
}
if ((dpCtx != null) && (dpCtx.getNumDPCols() >= 0)) {
// No static partition specified
if (dpCtx.getNumSPCols() == 0) {
output = new WriteEntity(dest_tab, determineWriteType(ltd, dest), true);
outputs.add(output);
output.setDynamicPartitionWrite(true);
} else // part of the partition specified
// Create a DummyPartition in this case. Since, the metastore does not store partial
// partitions currently, we need to store dummy partitions
{
try {
String ppath = dpCtx.getSPPath();
ppath = ppath.substring(0, ppath.length() - 1);
DummyPartition p = new DummyPartition(dest_tab, dest_tab.getDbName() + "@" + dest_tab.getTableName() + "@" + ppath, partSpec);
output = new WriteEntity(p, getWriteType(dest), false);
output.setDynamicPartitionWrite(true);
outputs.add(output);
} catch (HiveException e) {
throw new SemanticException(e.getMessage(), e);
}
}
}
return output;
}
Aggregations