use of org.apache.hadoop.hive.metastore.api.PartitionSpec in project hive by apache.
the class MetadataJSONSerializer method serializePartitionSpec.
@Override
@InterfaceAudience.LimitedPrivate({ "Hive" })
@InterfaceStability.Evolving
public List<String> serializePartitionSpec(HCatPartitionSpec hcatPartitionSpec) throws HCatException {
try {
List<String> stringReps = new ArrayList<String>();
TSerializer serializer = new TSerializer(new TJSONProtocol.Factory());
for (PartitionSpec partitionSpec : hcatPartitionSpec.partitionSpecProxy.toPartitionSpec()) {
stringReps.add(serializer.toString(partitionSpec));
}
return stringReps;
} catch (TException serializationException) {
throw new HCatException("Failed to serialize!", serializationException);
}
}
use of org.apache.hadoop.hive.metastore.api.PartitionSpec in project hive by apache.
the class TestMetastoreExpr method checkExpr.
public void checkExpr(int numParts, String dbName, String tblName, ExprNodeGenericFuncDesc expr, Table t) throws Exception {
List<Partition> parts = new ArrayList<Partition>();
client.listPartitionsByExpr(dbName, tblName, SerializationUtilities.serializeExpressionToKryo(expr), null, (short) -1, parts);
assertEquals("Partition check failed: " + expr.getExprString(), numParts, parts.size());
// check with partition spec as well
PartitionsByExprRequest req = new PartitionsByExprRequest(dbName, tblName, ByteBuffer.wrap(SerializationUtilities.serializeExpressionToKryo(expr)));
req.setMaxParts((short) -1);
req.setId(t.getId());
List<PartitionSpec> partSpec = new ArrayList<>();
client.listPartitionsSpecByExpr(req, partSpec);
int partSpecSize = 0;
if (!partSpec.isEmpty()) {
partSpecSize = partSpec.iterator().next().getSharedSDPartitionSpec().getPartitionsSize();
}
assertEquals("Partition Spec check failed: " + expr.getExprString(), numParts, partSpecSize);
}
use of org.apache.hadoop.hive.metastore.api.PartitionSpec in project hive by apache.
the class TestSessionHiveMetastoreClientListPartitionsTempTable method testListPartitionsSpecByExprDefMaxParts.
@Test
public void testListPartitionsSpecByExprDefMaxParts() throws Exception {
Table t = createTable4PartColsParts(getClient()).table;
TestMetastoreExpr.ExprBuilder e = new TestMetastoreExpr.ExprBuilder(TABLE_NAME);
List<PartitionSpec> result = new ArrayList<>();
PartitionsByExprRequest req = new PartitionsByExprRequest(DB_NAME, TABLE_NAME, ByteBuffer.wrap(SerializationUtilities.serializeExpressionToKryo(e.strCol("yyyy").val("2017").pred(">=", 2).build())));
req.setMaxParts((short) 3);
req.setId(t.getId());
getClient().listPartitionsSpecByExpr(req, result);
assertEquals(3, result.iterator().next().getSharedSDPartitionSpec().getPartitionsSize());
}
use of org.apache.hadoop.hive.metastore.api.PartitionSpec in project hive by apache.
the class TestSessionHiveMetastoreClientListPartitionsTempTable method checkExprPartitionSpec.
private void checkExprPartitionSpec(int numParts, ExprNodeGenericFuncDesc expr, Table t) throws Exception {
List<Partition> parts = new ArrayList<>();
getClient().listPartitionsByExpr(DB_NAME, TABLE_NAME, SerializationUtilities.serializeExpressionToKryo(expr), null, (short) -1, parts);
assertEquals("Partition check failed: " + expr.getExprString(), numParts, parts.size());
// check with partition spec as well
PartitionsByExprRequest req = new PartitionsByExprRequest(DB_NAME, TABLE_NAME, ByteBuffer.wrap(SerializationUtilities.serializeExpressionToKryo(expr)));
req.setMaxParts((short) -1);
req.setId(t.getId());
List<PartitionSpec> partSpec = new ArrayList<>();
getClient().listPartitionsSpecByExpr(req, partSpec);
int partSpecSize = 0;
if (!partSpec.isEmpty()) {
partSpecSize = partSpec.iterator().next().getSharedSDPartitionSpec().getPartitionsSize();
}
assertEquals("Partition Spec check failed: " + expr.getExprString(), numParts, partSpecSize);
}
use of org.apache.hadoop.hive.metastore.api.PartitionSpec in project hive by apache.
the class Hive method convertFromPartSpec.
// This method converts PartitionSpec to Partiton.
// This is required because listPartitionsSpecByExpr return set of PartitionSpec but hive
// require Partition
private static List<Partition> convertFromPartSpec(Iterator<PartitionSpec> iterator, Table tbl) throws HiveException, TException {
if (!iterator.hasNext()) {
return Collections.emptyList();
}
List<Partition> results = new ArrayList<>();
while (iterator.hasNext()) {
PartitionSpec partitionSpec = iterator.next();
if (partitionSpec.getPartitionList() != null) {
// partitions outside table location
Iterator<org.apache.hadoop.hive.metastore.api.Partition> externalPartItr = partitionSpec.getPartitionList().getPartitions().iterator();
while (externalPartItr.hasNext()) {
org.apache.hadoop.hive.metastore.api.Partition msPart = externalPartItr.next();
results.add(new Partition(tbl, msPart));
}
} else {
// partitions within table location
for (PartitionWithoutSD partitionWithoutSD : partitionSpec.getSharedSDPartitionSpec().getPartitions()) {
org.apache.hadoop.hive.metastore.api.Partition part = new org.apache.hadoop.hive.metastore.api.Partition();
part.setTableName(partitionSpec.getTableName());
part.setDbName(partitionSpec.getDbName());
part.setCatName(partitionSpec.getCatName());
part.setCreateTime(partitionWithoutSD.getCreateTime());
part.setLastAccessTime(partitionWithoutSD.getLastAccessTime());
part.setParameters(partitionWithoutSD.getParameters());
part.setPrivileges(partitionWithoutSD.getPrivileges());
part.setSd(partitionSpec.getSharedSDPartitionSpec().getSd().deepCopy());
String partitionLocation = null;
if (partitionWithoutSD.getRelativePath() == null || partitionWithoutSD.getRelativePath().isEmpty()) {
if (tbl.getDataLocation() != null) {
Path partPath = new Path(tbl.getDataLocation(), Warehouse.makePartName(tbl.getPartCols(), partitionWithoutSD.getValues()));
partitionLocation = partPath.toString();
}
} else {
partitionLocation = tbl.getSd().getLocation();
partitionLocation += partitionWithoutSD.getRelativePath();
}
part.getSd().setLocation(partitionLocation);
part.setValues(partitionWithoutSD.getValues());
part.setWriteId(partitionSpec.getWriteId());
Partition hivePart = new Partition(tbl, part);
results.add(hivePart);
}
}
}
return results;
}
Aggregations