use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class HBaseStore method getPartitions.
@Override
public List<Partition> getPartitions(String dbName, String tableName, int max) throws MetaException, NoSuchObjectException {
boolean commit = false;
openTransaction();
try {
List<Partition> parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(dbName), HiveStringUtils.normalizeIdentifier(tableName), max);
commit = true;
return parts;
} catch (IOException e) {
LOG.error("Unable to get partitions", e);
throw new MetaException("Error scanning partitions");
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class HBaseStore method listPartitionNames.
@Override
public List<String> listPartitionNames(String db_name, String tbl_name, short max_parts) throws MetaException {
boolean commit = false;
openTransaction();
try {
List<Partition> parts = getHBase().scanPartitionsInTable(HiveStringUtils.normalizeIdentifier(db_name), HiveStringUtils.normalizeIdentifier(tbl_name), max_parts);
if (parts == null)
return null;
List<String> names = new ArrayList<String>(parts.size());
Table table = getHBase().getTable(HiveStringUtils.normalizeIdentifier(db_name), HiveStringUtils.normalizeIdentifier(tbl_name));
for (Partition p : parts) {
names.add(buildExternalPartName(table, p));
}
commit = true;
return names;
} catch (IOException e) {
LOG.error("Unable to get partitions", e);
throw new MetaException("Error scanning partitions");
} finally {
commitOrRoleBack(commit);
}
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class HBaseReadWrite method getPartition.
private Partition getPartition(String dbName, String tableName, List<String> partVals, boolean populateCache) throws IOException {
Partition cached = partCache.get(dbName, tableName, partVals);
if (cached != null)
return cached;
byte[] key = HBaseUtils.buildPartitionKey(dbName, tableName, HBaseUtils.getPartitionKeyTypes(getTable(dbName, tableName).getPartitionKeys()), partVals);
byte[] serialized = read(PART_TABLE, key, CATALOG_CF, CATALOG_COL);
if (serialized == null)
return null;
HBaseUtils.StorageDescriptorParts sdParts = HBaseUtils.deserializePartition(dbName, tableName, partVals, serialized);
StorageDescriptor sd = getStorageDescriptor(sdParts.sdHash);
HBaseUtils.assembleStorageDescriptor(sd, sdParts);
if (populateCache)
partCache.put(dbName, tableName, sdParts.containingPartition);
return sdParts.containingPartition;
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class HBaseReadWrite method putPartitions.
/**
* Add a group of partitions. This should only be used when all partitions are new. It
* blindly increments the ref count on the storage descriptor.
* @param partitions list of partitions to add
* @throws IOException
*/
void putPartitions(List<Partition> partitions) throws IOException {
List<Put> puts = new ArrayList<>(partitions.size());
for (Partition partition : partitions) {
byte[] hash = putStorageDescriptor(partition.getSd());
List<String> partTypes = HBaseUtils.getPartitionKeyTypes(getTable(partition.getDbName(), partition.getTableName()).getPartitionKeys());
byte[][] serialized = HBaseUtils.serializePartition(partition, partTypes, hash);
Put p = new Put(serialized[0]);
p.add(CATALOG_CF, CATALOG_COL, serialized[1]);
puts.add(p);
partCache.put(partition.getDbName(), partition.getTableName(), partition);
}
HTableInterface htab = conn.getHBaseTable(PART_TABLE);
htab.put(puts);
conn.flush(htab);
}
use of org.apache.hadoop.hive.metastore.api.Partition in project hive by apache.
the class HiveAlterHandler method alterPartitions.
@Override
public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname, final String name, final List<Partition> new_parts, EnvironmentContext environmentContext, HMSHandler handler) throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
List<Partition> oldParts = new ArrayList<Partition>();
List<List<String>> partValsList = new ArrayList<List<String>>();
List<MetaStoreEventListener> transactionalListeners = null;
if (handler != null) {
transactionalListeners = handler.getTransactionalListeners();
}
Table tbl = msdb.getTable(dbname, name);
if (tbl == null) {
throw new InvalidObjectException("Unable to alter partitions because table or database does not exist.");
}
boolean success = false;
try {
msdb.openTransaction();
for (Partition tmpPart : new_parts) {
// Set DDL time to now if not specified
if (tmpPart.getParameters() == null || tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME) == null || Integer.parseInt(tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System.currentTimeMillis() / 1000));
}
Partition oldTmpPart = msdb.getPartition(dbname, name, tmpPart.getValues());
oldParts.add(oldTmpPart);
partValsList.add(tmpPart.getValues());
if (MetaStoreUtils.requireCalStats(hiveConf, oldTmpPart, tmpPart, tbl, environmentContext)) {
// Check if stats are same, no need to update
if (MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) {
MetaStoreUtils.updateBasicState(environmentContext, tmpPart.getParameters());
} else {
MetaStoreUtils.updatePartitionStatsFast(tmpPart, wh, false, true, environmentContext);
}
}
updatePartColumnStats(msdb, dbname, name, oldTmpPart.getValues(), tmpPart);
}
msdb.alterPartitions(dbname, name, partValsList, new_parts);
Iterator<Partition> oldPartsIt = oldParts.iterator();
for (Partition newPart : new_parts) {
Partition oldPart;
if (oldPartsIt.hasNext()) {
oldPart = oldPartsIt.next();
} else {
throw new InvalidOperationException("Missing old partition corresponding to new partition " + "when invoking MetaStoreEventListener for alterPartitions event.");
}
if (transactionalListeners != null && transactionalListeners.size() > 0) {
AlterPartitionEvent alterPartitionEvent = new AlterPartitionEvent(oldPart, newPart, tbl, true, handler);
for (MetaStoreEventListener transactionalListener : transactionalListeners) {
transactionalListener.onAlterPartition(alterPartitionEvent);
}
}
}
success = msdb.commitTransaction();
} catch (InvalidObjectException | NoSuchObjectException e) {
throw new InvalidOperationException("Alter partition operation failed: " + e);
} finally {
if (!success) {
msdb.rollbackTransaction();
}
}
return oldParts;
}
Aggregations