use of org.apache.hadoop.hive.metastore.api.GetPartitionsRequest in project hive by apache.
the class TestGetPartitionsUsingProjectionAndFilterSpecs method getGetPartitionsRequest.
private GetPartitionsRequest getGetPartitionsRequest() {
GetPartitionsRequest request = new GetPartitionsRequest();
request.setProjectionSpec(new GetProjectionsSpec());
request.setFilterSpec(new GetPartitionsFilterSpec());
request.setTblName(tblName);
request.setDbName(dbName);
return request;
}
use of org.apache.hadoop.hive.metastore.api.GetPartitionsRequest in project hive by apache.
the class TestGetPartitionsUsingProjectionAndFilterSpecs method testProjectionUsingJDO.
@Test
public void testProjectionUsingJDO() throws Throwable {
// disable direct SQL to make sure
client.setMetaConf(ConfVars.TRY_DIRECT_SQL.getVarname(), "false");
GetPartitionsRequest request = getGetPartitionsRequest();
GetProjectionsSpec projectSpec = request.getProjectionSpec();
List<String> projectedFields = Collections.singletonList("sd.location");
projectSpec.setFieldList(projectedFields);
GetPartitionsResponse response = client.getPartitionsWithSpecs(request);
Assert.assertEquals(1, response.getPartitionSpec().size());
PartitionSpec partitionSpec = response.getPartitionSpec().get(0);
Assert.assertTrue("DbName is not set", partitionSpec.isSetDbName());
Assert.assertTrue("tableName is not set", partitionSpec.isSetTableName());
PartitionSpecWithSharedSD partitionSpecWithSharedSD = partitionSpec.getSharedSDPartitionSpec();
StorageDescriptor sharedSD = partitionSpecWithSharedSD.getSd();
Assert.assertNotNull(sharedSD);
List<PartitionWithoutSD> partitionWithoutSDS = partitionSpecWithSharedSD.getPartitions();
Assert.assertNotNull(partitionWithoutSDS);
Assert.assertEquals(partitionWithoutSDS.size(), origPartitions.size());
comparePartitionForSingleValuedFields(projectedFields, sharedSD, partitionWithoutSDS, 0);
// set all the single-valued fields and try using JDO
request = getGetPartitionsRequest();
projectSpec = request.getProjectionSpec();
projectedFields = Arrays.asList("dbName", "tableName", "createTime", "lastAccessTime", "sd.location", "sd.inputFormat", "sd.outputFormat", "sd.compressed", "sd.numBuckets", "sd.serdeInfo.name", "sd.serdeInfo.serializationLib", "sd.serdeInfo.serdeType", "sd.serdeInfo.serializerClass", "sd.serdeInfo.deserializerClass");
projectSpec.setFieldList(projectedFields);
response = client.getPartitionsWithSpecs(request);
Assert.assertEquals(1, response.getPartitionSpec().size());
partitionSpec = response.getPartitionSpec().get(0);
Assert.assertTrue("DbName is not set", partitionSpec.isSetDbName());
Assert.assertTrue("tableName is not set", partitionSpec.isSetTableName());
partitionSpecWithSharedSD = partitionSpec.getSharedSDPartitionSpec();
sharedSD = partitionSpecWithSharedSD.getSd();
Assert.assertNotNull(sharedSD);
partitionWithoutSDS = partitionSpecWithSharedSD.getPartitions();
Assert.assertNotNull(partitionWithoutSDS);
Assert.assertEquals(partitionWithoutSDS.size(), origPartitions.size());
comparePartitionForSingleValuedFields(projectedFields, sharedSD, partitionWithoutSDS, 0);
}
use of org.apache.hadoop.hive.metastore.api.GetPartitionsRequest in project hive by apache.
the class TestGetPartitionsUsingProjectionAndFilterSpecs method testPartitionProjectionExcludeParameters.
@Test
public void testPartitionProjectionExcludeParameters() throws Throwable {
GetPartitionsRequest request = getGetPartitionsRequest();
GetProjectionsSpec projectSpec = request.getProjectionSpec();
projectSpec.setFieldList(Arrays.asList("dbName", "tableName", "catName", "parameters", "values"));
projectSpec.setExcludeParamKeyPattern(EXCLUDE_KEY_PREFIX + "%");
GetPartitionsResponse response = client.getPartitionsWithSpecs(request);
PartitionSpecWithSharedSD partitionSpecWithSharedSD = response.getPartitionSpec().get(0).getSharedSDPartitionSpec();
Assert.assertNotNull("All the partitions should be returned in sharedSD spec", partitionSpecWithSharedSD);
PartitionListComposingSpec partitionListComposingSpec = response.getPartitionSpec().get(0).getPartitionList();
Assert.assertNull("Partition list composing spec should be null", partitionListComposingSpec);
for (PartitionWithoutSD retPartion : partitionSpecWithSharedSD.getPartitions()) {
Assert.assertFalse("excluded parameter key is found in the response", retPartion.getParameters().containsKey(EXCLUDE_KEY_PREFIX + "key1"));
Assert.assertFalse("excluded parameter key is found in the response", retPartion.getParameters().containsKey(EXCLUDE_KEY_PREFIX + "key2"));
}
}
use of org.apache.hadoop.hive.metastore.api.GetPartitionsRequest in project hive by apache.
the class TestGetPartitionsUsingProjectionAndFilterSpecs method testNestedMultiValuedFieldProjection.
@Test
public void testNestedMultiValuedFieldProjection() throws TException {
GetPartitionsRequest request = getGetPartitionsRequest();
GetProjectionsSpec projectSpec = request.getProjectionSpec();
projectSpec.setFieldList(Arrays.asList("sd.cols.name", "sd.cols.type"));
GetPartitionsResponse response = client.getPartitionsWithSpecs(request);
PartitionSpecWithSharedSD partitionSpecWithSharedSD = response.getPartitionSpec().get(0).getSharedSDPartitionSpec();
StorageDescriptor sharedSD = partitionSpecWithSharedSD.getSd();
Assert.assertNotNull("sd.cols were requested but was not returned", sharedSD.getCols());
for (FieldSchema col : sharedSD.getCols()) {
Assert.assertTrue("sd.cols.name was requested but was not returned", col.isSetName());
Assert.assertTrue("sd.cols.type was requested but was not returned", col.isSetType());
Assert.assertFalse("sd.cols.comment was not requested but was returned", col.isSetComment());
}
}
use of org.apache.hadoop.hive.metastore.api.GetPartitionsRequest in project hive by apache.
the class GetPartitionsRequestBuilder method build.
public GetPartitionsRequest build() {
GetPartitionsRequest partitionsRequest = new GetPartitionsRequest(dbName, tblName, projectionSpec, filterSpec);
partitionsRequest.setCatName(catName);
partitionsRequest.setWithAuth(withAuth);
partitionsRequest.setUser(user);
partitionsRequest.setGroupNames(groupNames);
partitionsRequest.setProcessorCapabilities(processorCapabilities);
partitionsRequest.setProcessorIdentifier(processorIdentifier);
partitionsRequest.setValidWriteIdList(validWriteIdList);
return partitionsRequest;
}
Aggregations