use of org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest in project hive by apache.
the class Hive method buildPartitionByExprRequest.
private PartitionsByExprRequest buildPartitionByExprRequest(Table tbl, byte[] exprBytes, String defaultPartitionName, HiveConf conf, String validWriteIdList) {
PartitionsByExprRequest req = new PartitionsByExprRequest(tbl.getDbName(), tbl.getTableName(), ByteBuffer.wrap(exprBytes));
if (defaultPartitionName != null) {
req.setDefaultPartitionName(defaultPartitionName);
}
req.setCatName(getDefaultCatalog(conf));
req.setValidWriteIdList(validWriteIdList);
req.setId(tbl.getTTable().getId());
return req;
}
use of org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest in project hive by apache.
the class Hive method getPartitionsByExpr.
/**
* Get a list of Partitions by expr.
* @param tbl The table containing the partitions.
* @param expr A serialized expression for partition predicates.
* @param conf Hive config.
* @param partitions the resulting list of partitions
* @return whether the resulting list contains partitions which may or may not match the expr
*/
public boolean getPartitionsByExpr(Table tbl, ExprNodeGenericFuncDesc expr, HiveConf conf, List<Partition> partitions) throws HiveException, TException {
PerfLogger perfLogger = SessionState.getPerfLogger();
perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_BY_EXPR);
try {
Preconditions.checkNotNull(partitions);
byte[] exprBytes = SerializationUtilities.serializeExpressionToKryo(expr);
String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME);
List<org.apache.hadoop.hive.metastore.api.PartitionSpec> msParts = new ArrayList<>();
ValidWriteIdList validWriteIdList = null;
PartitionsByExprRequest req = buildPartitionByExprRequest(tbl, exprBytes, defaultPartitionName, conf, null);
if (AcidUtils.isTransactionalTable(tbl)) {
validWriteIdList = getValidWriteIdList(tbl.getDbName(), tbl.getTableName());
req.setValidWriteIdList(validWriteIdList != null ? validWriteIdList.toString() : null);
req.setId(tbl.getTTable().getId());
}
boolean hasUnknownParts = getMSC().listPartitionsSpecByExpr(req, msParts);
partitions.addAll(convertFromPartSpec(msParts.iterator(), tbl));
return hasUnknownParts;
} finally {
perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_BY_EXPR, "HS2-cache");
}
}
use of org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest in project hive by apache.
the class Hive method getPartitionNames.
public List<String> getPartitionNames(Table tbl, ExprNodeGenericFuncDesc expr, String order, short maxParts) throws HiveException {
List<String> names = null;
// the exprBytes should not be null by thrift definition
byte[] exprBytes = { (byte) -1 };
if (expr != null) {
exprBytes = SerializationUtilities.serializeExpressionToKryo(expr);
}
try {
String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME);
PartitionsByExprRequest req = new PartitionsByExprRequest(tbl.getDbName(), tbl.getTableName(), ByteBuffer.wrap(exprBytes));
if (defaultPartitionName != null) {
req.setDefaultPartitionName(defaultPartitionName);
}
if (maxParts >= 0) {
req.setMaxParts(maxParts);
}
req.setOrder(order);
req.setCatName(tbl.getCatalogName());
if (AcidUtils.isTransactionalTable(tbl)) {
ValidWriteIdList validWriteIdList = getValidWriteIdList(tbl.getDbName(), tbl.getTableName());
req.setValidWriteIdList(validWriteIdList != null ? validWriteIdList.toString() : null);
req.setId(tbl.getTTable().getId());
}
names = getMSC().listPartitionNames(req);
} catch (NoSuchObjectException nsoe) {
return Lists.newArrayList();
} catch (Exception e) {
LOG.error("Failed getPartitionNames", e);
throw new HiveException(e);
}
return names;
}
use of org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest in project metacat by Netflix.
the class CatalogThriftHiveMetastore method drop_partitions_req.
/**
* {@inheritDoc}
*/
@Override
public DropPartitionsResult drop_partitions_req(final DropPartitionsRequest request) throws TException {
return requestWrapper("drop_partitions_req", new Object[] { request }, () -> {
final String databaseName = request.getDbName();
final String tableName = request.getTblName();
final boolean ifExists = request.isSetIfExists() && request.isIfExists();
final boolean needResult = !request.isSetNeedResult() || request.isNeedResult();
final List<Partition> parts = Lists.newArrayList();
final List<String> partNames = Lists.newArrayList();
int minCount = 0;
final RequestPartsSpec spec = request.getParts();
if (spec.isSetExprs()) {
final Table table = get_table(databaseName, tableName);
// Dropping by expressions.
for (DropPartitionsExpr expr : spec.getExprs()) {
// At least one partition per expression, if not ifExists
++minCount;
final PartitionsByExprResult partitionsByExprResult = get_partitions_by_expr(new PartitionsByExprRequest(databaseName, tableName, expr.bufferForExpr()));
if (partitionsByExprResult.isHasUnknownPartitions()) {
// Expr is built by DDLSA, it should only contain part cols and simple ops
throw new MetaException("Unexpected unknown partitions to drop");
}
parts.addAll(partitionsByExprResult.getPartitions());
}
final List<String> colNames = new ArrayList<>(table.getPartitionKeys().size());
for (FieldSchema col : table.getPartitionKeys()) {
colNames.add(col.getName());
}
if (!colNames.isEmpty()) {
parts.forEach(partition -> partNames.add(FileUtils.makePartName(colNames, partition.getValues())));
}
} else if (spec.isSetNames()) {
partNames.addAll(spec.getNames());
minCount = partNames.size();
parts.addAll(get_partitions_by_names(databaseName, tableName, partNames));
} else {
throw new MetaException("Partition spec is not set");
}
if ((parts.size() < minCount) && !ifExists) {
throw new NoSuchObjectException("Some partitions to drop are missing");
}
partV1.deletePartitions(catalogName, databaseName, tableName, partNames);
final DropPartitionsResult result = new DropPartitionsResult();
if (needResult) {
result.setPartitions(parts);
}
return result;
});
}
use of org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest in project hive by apache.
the class TestSessionHiveMetastoreClientListPartitionsTempTable method checkPartitionNames.
private void checkPartitionNames(List<String> expected, short numParts, String order, String defaultPartName, ExprNodeGenericFuncDesc expr, Table t) throws Exception {
PartitionsByExprRequest request = new PartitionsByExprRequest();
request.setDbName(DB_NAME);
request.setTblName(TABLE_NAME);
byte[] exprs = { (byte) -1 };
if (expr != null) {
exprs = SerializationUtilities.serializeExpressionToKryo(expr);
}
request.setExpr(exprs);
request.setMaxParts(numParts);
request.setOrder(order);
request.setDefaultPartitionName(defaultPartName);
request.setId(t.getId());
List<String> partitionNames = getClient().listPartitionNames(request);
assertArrayEquals(expected.toArray(), partitionNames.toArray());
}
Aggregations