use of org.apache.hadoop.hive.metastore.api.PartitionsByExprResult in project hive by apache.
the class SessionHiveMetaStoreClient method getPartitionsByExprInternal.
@Override
protected PartitionsByExprResult getPartitionsByExprInternal(PartitionsByExprRequest req) throws TException {
Map<Object, Object> queryCache = getQueryCache();
if (queryCache != null) {
// Retrieve or populate cache
CacheKey cacheKey = new CacheKey(KeyType.PARTITIONS_BY_EXPR, req);
PartitionsByExprResult v = (PartitionsByExprResult) queryCache.get(cacheKey);
if (v == null) {
v = super.getPartitionsByExprInternal(req);
queryCache.put(cacheKey, v);
} else {
LOG.debug("Query level HMS cache: method=getPartitionsByExprInternal, dbName={}, tblName={}", req.getDbName(), req.getTblName());
}
return v;
}
return super.getPartitionsByExprInternal(req);
}
use of org.apache.hadoop.hive.metastore.api.PartitionsByExprResult in project metacat by Netflix.
the class CatalogThriftHiveMetastore method drop_partitions_req.
/**
* {@inheritDoc}
*/
@Override
public DropPartitionsResult drop_partitions_req(final DropPartitionsRequest request) throws TException {
return requestWrapper("drop_partitions_req", new Object[] { request }, () -> {
final String databaseName = request.getDbName();
final String tableName = request.getTblName();
final boolean ifExists = request.isSetIfExists() && request.isIfExists();
final boolean needResult = !request.isSetNeedResult() || request.isNeedResult();
final List<Partition> parts = Lists.newArrayList();
final List<String> partNames = Lists.newArrayList();
int minCount = 0;
final RequestPartsSpec spec = request.getParts();
if (spec.isSetExprs()) {
final Table table = get_table(databaseName, tableName);
// Dropping by expressions.
for (DropPartitionsExpr expr : spec.getExprs()) {
// At least one partition per expression, if not ifExists
++minCount;
final PartitionsByExprResult partitionsByExprResult = get_partitions_by_expr(new PartitionsByExprRequest(databaseName, tableName, expr.bufferForExpr()));
if (partitionsByExprResult.isHasUnknownPartitions()) {
// Expr is built by DDLSA, it should only contain part cols and simple ops
throw new MetaException("Unexpected unknown partitions to drop");
}
parts.addAll(partitionsByExprResult.getPartitions());
}
final List<String> colNames = new ArrayList<>(table.getPartitionKeys().size());
for (FieldSchema col : table.getPartitionKeys()) {
colNames.add(col.getName());
}
if (!colNames.isEmpty()) {
parts.forEach(partition -> partNames.add(FileUtils.makePartName(colNames, partition.getValues())));
}
} else if (spec.isSetNames()) {
partNames.addAll(spec.getNames());
minCount = partNames.size();
parts.addAll(get_partitions_by_names(databaseName, tableName, partNames));
} else {
throw new MetaException("Partition spec is not set");
}
if ((parts.size() < minCount) && !ifExists) {
throw new NoSuchObjectException("Some partitions to drop are missing");
}
partV1.deletePartitions(catalogName, databaseName, tableName, partNames);
final DropPartitionsResult result = new DropPartitionsResult();
if (needResult) {
result.setPartitions(parts);
}
return result;
});
}
use of org.apache.hadoop.hive.metastore.api.PartitionsByExprResult in project hive by apache.
the class HiveMetaStoreClientWithLocalCache method getPartitionsByExprInternal.
@Override
protected PartitionsByExprResult getPartitionsByExprInternal(PartitionsByExprRequest req) throws TException {
if (isCacheEnabledAndInitialized()) {
// table should be transactional to get responses from the cache
TableWatermark watermark = new TableWatermark(req.getValidWriteIdList(), getTable(req.getDbName(), req.getTblName()).getId());
if (watermark.isValid()) {
CacheKey cacheKey = new CacheKey(KeyType.PARTITIONS_BY_EXPR, watermark, req);
PartitionsByExprResult r = (PartitionsByExprResult) mscLocalCache.getIfPresent(cacheKey);
if (r == null) {
r = super.getPartitionsByExprInternal(req);
mscLocalCache.put(cacheKey, r);
} else {
LOG.debug("HS2 level HMS cache: method=getPartitionsByExprInternal, dbName={}, tblName={}", req.getDbName(), req.getTblName());
}
if (LOG.isDebugEnabled() && recordStats) {
LOG.debug(cacheObjName + ": " + mscLocalCache.stats().toString());
}
return r;
}
}
return super.getPartitionsByExprInternal(req);
}
Aggregations