use of org.apache.hadoop.hive.metastore.model.MPartition in project hive by apache.
the class ObjectStore method listMPartitions.
// TODO:pc implement max
private List<MPartition> listMPartitions(String dbName, String tableName, int max, QueryWrapper queryWrapper) {
boolean success = false;
List<MPartition> mparts = null;
try {
openTransaction();
LOG.debug("Executing listMPartitions");
dbName = HiveStringUtils.normalizeIdentifier(dbName);
tableName = HiveStringUtils.normalizeIdentifier(tableName);
Query query = queryWrapper.query = pm.newQuery(MPartition.class, "table.tableName == t1 && table.database.name == t2");
query.declareParameters("java.lang.String t1, java.lang.String t2");
query.setOrdering("partitionName ascending");
if (max > 0) {
query.setRange(0, max);
}
mparts = (List<MPartition>) query.execute(tableName, dbName);
LOG.debug("Done executing query for listMPartitions");
pm.retrieveAll(mparts);
success = commitTransaction();
LOG.debug("Done retrieving all objects for listMPartitions " + mparts);
} finally {
if (!success) {
rollbackTransaction();
}
}
return mparts;
}
use of org.apache.hadoop.hive.metastore.model.MPartition in project hive by apache.
the class ObjectStore method addPartitions.
@Override
public boolean addPartitions(String dbName, String tblName, List<Partition> parts) throws InvalidObjectException, MetaException {
boolean success = false;
openTransaction();
try {
List<MTablePrivilege> tabGrants = null;
List<MTableColumnPrivilege> tabColumnGrants = null;
MTable table = this.getMTable(dbName, tblName);
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(dbName, tblName);
tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName);
}
List<Object> toPersist = new ArrayList<Object>();
for (Partition part : parts) {
if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table " + dbName + "." + tblName + ": " + part);
}
MPartition mpart = convertToMPart(part, true);
toPersist.add(mpart);
int now = (int) (System.currentTimeMillis() / 1000);
if (tabGrants != null) {
for (MTablePrivilege tab : tabGrants) {
toPersist.add(new MPartitionPrivilege(tab.getPrincipalName(), tab.getPrincipalType(), mpart, tab.getPrivilege(), now, tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption()));
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
toPersist.add(new MPartitionColumnPrivilege(col.getPrincipalName(), col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(), now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()));
}
}
}
if (toPersist.size() > 0) {
pm.makePersistentAll(toPersist);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
use of org.apache.hadoop.hive.metastore.model.MPartition in project hive by apache.
the class ObjectStore method convertToParts.
private List<Partition> convertToParts(List<MPartition> src, List<Partition> dest) throws MetaException {
if (src == null) {
return dest;
}
if (dest == null) {
dest = new ArrayList<Partition>(src.size());
}
for (MPartition mp : src) {
dest.add(convertToPart(mp));
Deadline.checkTimeout();
}
return dest;
}
use of org.apache.hadoop.hive.metastore.model.MPartition in project hive by apache.
the class ObjectStore method addPartition.
@Override
public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
boolean success = false;
boolean commited = false;
try {
MTable table = this.getMTable(part.getDbName(), part.getTableName());
List<MTablePrivilege> tabGrants = null;
List<MTableColumnPrivilege> tabColumnGrants = null;
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(part.getDbName(), part.getTableName());
tabColumnGrants = this.listTableAllColumnGrants(part.getDbName(), part.getTableName());
}
openTransaction();
MPartition mpart = convertToMPart(part, true);
pm.makePersistent(mpart);
int now = (int) (System.currentTimeMillis() / 1000);
List<Object> toPersist = new ArrayList<Object>();
if (tabGrants != null) {
for (MTablePrivilege tab : tabGrants) {
MPartitionPrivilege partGrant = new MPartitionPrivilege(tab.getPrincipalName(), tab.getPrincipalType(), mpart, tab.getPrivilege(), now, tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption());
toPersist.add(partGrant);
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
MPartitionColumnPrivilege partColumn = new MPartitionColumnPrivilege(col.getPrincipalName(), col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(), now, col.getGrantor(), col.getGrantorType(), col.getGrantOption());
toPersist.add(partColumn);
}
if (toPersist.size() > 0) {
pm.makePersistentAll(toPersist);
}
}
commited = commitTransaction();
success = true;
} finally {
if (!commited) {
rollbackTransaction();
}
}
return success;
}
use of org.apache.hadoop.hive.metastore.model.MPartition in project hive by apache.
the class ObjectStore method getPartitionsWithAuth.
@Override
public List<Partition> getPartitionsWithAuth(String dbName, String tblName, short max, String userName, List<String> groupNames) throws MetaException, InvalidObjectException {
boolean success = false;
QueryWrapper queryWrapper = new QueryWrapper();
try {
openTransaction();
List<MPartition> mparts = listMPartitions(dbName, tblName, max, queryWrapper);
List<Partition> parts = new ArrayList<Partition>(mparts.size());
if (mparts != null && mparts.size() > 0) {
for (MPartition mpart : mparts) {
MTable mtbl = mpart.getTable();
Partition part = convertToPart(mpart);
parts.add(part);
if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl.getPartitionKeys()), part.getValues());
PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName, tblName, partName, userName, groupNames);
part.setPrivileges(partAuth);
}
}
}
success = commitTransaction();
return parts;
} finally {
if (!success) {
rollbackTransaction();
}
queryWrapper.close();
}
}
Aggregations