use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class TestHBaseImport method parallelOdd.
// Same as the test above except we create 9 of everything instead of 10. This is important
// because in using a batch size of 2 the previous test guarantees 10 /2 =5 , meaning we'll
// have 5 writes on the partition queue with exactly 2 entries. In this test we'll handle the
// case where the last entry in the queue has fewer partitions.
@Test
public void parallelOdd() throws Exception {
int parallelFactor = 9;
RawStore rdbms;
rdbms = new ObjectStore();
rdbms.setConf(conf);
String[] dbNames = new String[] { "oddparalleldb1" };
int now = (int) System.currentTimeMillis() / 1000;
for (int i = 0; i < dbNames.length; i++) {
rdbms.createDatabase(new Database(dbNames[i], "no description", "file:/tmp", emptyParameters));
List<FieldSchema> cols = new ArrayList<>();
cols.add(new FieldSchema("col1", "int", "nocomment"));
SerDeInfo serde = new SerDeInfo("serde", "seriallib", null);
StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters);
List<FieldSchema> partCols = new ArrayList<>();
partCols.add(new FieldSchema("region", "string", ""));
for (int j = 0; j < parallelFactor; j++) {
rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, emptyParameters, null, null, null));
for (int k = 0; k < parallelFactor; k++) {
StorageDescriptor psd = new StorageDescriptor(sd);
psd.setLocation("file:/tmp/region=" + k);
Partition part = new Partition(Arrays.asList("p" + k), dbNames[i], "t" + j, now, now, psd, emptyParameters);
rdbms.addPartition(part);
}
}
}
HBaseImport importer = new HBaseImport("-p", "2", "-b", "2", "-d", dbNames[0]);
importer.setConnections(rdbms, store);
importer.run();
for (int i = 0; i < dbNames.length; i++) {
Database db = store.getDatabase(dbNames[i]);
Assert.assertNotNull(db);
for (int j = 0; j < parallelFactor; j++) {
Table table = store.getTable(db.getName(), "t" + j);
Assert.assertNotNull(table);
Assert.assertEquals(now, table.getLastAccessTime());
Assert.assertEquals("input", table.getSd().getInputFormat());
for (int k = 0; k < parallelFactor; k++) {
Partition part = store.getPartition(dbNames[i], "t" + j, Arrays.asList("p" + k));
Assert.assertNotNull(part);
Assert.assertEquals("file:/tmp/region=" + k, part.getSd().getLocation());
}
Assert.assertEquals(parallelFactor, store.getPartitions(dbNames[i], "t" + j, -1).size());
}
Assert.assertEquals(parallelFactor, store.getAllTables(dbNames[i]).size());
}
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class ObjectStore method getPartitionsByExprInternal.
protected boolean getPartitionsByExprInternal(String dbName, String tblName, final byte[] expr, final String defaultPartitionName, final short maxParts, List<Partition> result, boolean allowSql, boolean allowJdo) throws TException {
assert result != null;
final ExpressionTree exprTree = PartFilterExprUtil.makeExpressionTree(expressionProxy, expr);
final AtomicBoolean hasUnknownPartitions = new AtomicBoolean(false);
result.addAll(new GetListHelper<Partition>(dbName, tblName, allowSql, allowJdo) {
@Override
protected List<Partition> getSqlResult(GetHelper<List<Partition>> ctx) throws MetaException {
// If we have some sort of expression tree, try SQL filter pushdown.
List<Partition> result = null;
if (exprTree != null) {
SqlFilterForPushdown filter = new SqlFilterForPushdown();
if (directSql.generateSqlFilterForPushdown(ctx.getTable(), exprTree, filter)) {
return directSql.getPartitionsViaSqlFilter(filter, null);
}
}
// We couldn't do SQL filter pushdown. Get names via normal means.
List<String> partNames = new LinkedList<String>();
hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn(ctx.getTable(), expr, defaultPartitionName, maxParts, partNames));
return directSql.getPartitionsViaSqlFilter(dbName, tblName, partNames);
}
@Override
protected List<Partition> getJdoResult(GetHelper<List<Partition>> ctx) throws MetaException, NoSuchObjectException {
// If we have some sort of expression tree, try JDOQL filter pushdown.
List<Partition> result = null;
if (exprTree != null) {
result = getPartitionsViaOrmFilter(ctx.getTable(), exprTree, maxParts, false);
}
if (result == null) {
// We couldn't do JDOQL filter pushdown. Get names via normal means.
List<String> partNames = new ArrayList<String>();
hasUnknownPartitions.set(getPartitionNamesPrunedByExprNoTxn(ctx.getTable(), expr, defaultPartitionName, maxParts, partNames));
result = getPartitionsViaOrmFilter(dbName, tblName, partNames);
}
return result;
}
}.run(true));
return hasUnknownPartitions.get();
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class ObjectStore method getPartitionWithAuth.
@Override
public Partition getPartitionWithAuth(String dbName, String tblName, List<String> partVals, String user_name, List<String> group_names) throws NoSuchObjectException, MetaException, InvalidObjectException {
boolean success = false;
try {
openTransaction();
MPartition mpart = getMPartition(dbName, tblName, partVals);
if (mpart == null) {
commitTransaction();
throw new NoSuchObjectException("partition values=" + partVals.toString());
}
Partition part = null;
MTable mtbl = mpart.getTable();
part = convertToPart(mpart);
if ("TRUE".equalsIgnoreCase(mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
String partName = Warehouse.makePartName(this.convertToFieldSchemas(mtbl.getPartitionKeys()), partVals);
PrincipalPrivilegeSet partAuth = this.getPartitionPrivilegeSet(dbName, tblName, partName, user_name, group_names);
part.setPrivileges(partAuth);
}
success = commitTransaction();
return part;
} finally {
if (!success) {
rollbackTransaction();
}
}
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class ObjectStore method addPartitions.
@Override
public boolean addPartitions(String dbName, String tblName, List<Partition> parts) throws InvalidObjectException, MetaException {
boolean success = false;
openTransaction();
try {
List<MTablePrivilege> tabGrants = null;
List<MTableColumnPrivilege> tabColumnGrants = null;
MTable table = this.getMTable(dbName, tblName);
if ("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE"))) {
tabGrants = this.listAllTableGrants(dbName, tblName);
tabColumnGrants = this.listTableAllColumnGrants(dbName, tblName);
}
List<Object> toPersist = new ArrayList<Object>();
for (Partition part : parts) {
if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) {
throw new MetaException("Partition does not belong to target table " + dbName + "." + tblName + ": " + part);
}
MPartition mpart = convertToMPart(part, true);
toPersist.add(mpart);
int now = (int) (System.currentTimeMillis() / 1000);
if (tabGrants != null) {
for (MTablePrivilege tab : tabGrants) {
toPersist.add(new MPartitionPrivilege(tab.getPrincipalName(), tab.getPrincipalType(), mpart, tab.getPrivilege(), now, tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption()));
}
}
if (tabColumnGrants != null) {
for (MTableColumnPrivilege col : tabColumnGrants) {
toPersist.add(new MPartitionColumnPrivilege(col.getPrincipalName(), col.getPrincipalType(), mpart, col.getColumnName(), col.getPrivilege(), now, col.getGrantor(), col.getGrantorType(), col.getGrantOption()));
}
}
}
if (toPersist.size() > 0) {
pm.makePersistentAll(toPersist);
}
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
return success;
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class ObjectStore method convertToParts.
private List<Partition> convertToParts(List<MPartition> src, List<Partition> dest) throws MetaException {
if (src == null) {
return dest;
}
if (dest == null) {
dest = new ArrayList<Partition>(src.size());
}
for (MPartition mp : src) {
dest.add(convertToPart(mp));
Deadline.checkTimeout();
}
return dest;
}
Aggregations