use of org.apache.hadoop.hive.metastore.api.Partition in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitionsByNames.
protected Map<String, PartitionHolder> getPartitionsByNames(final Table table, final List<String> partitionNames) {
final String databasename = table.getDbName();
final String tablename = table.getTableName();
try {
final List<Partition> partitions = metacatHiveClient.getPartitions(databasename, tablename, partitionNames);
return partitions.stream().map(PartitionHolder::new).collect(Collectors.toMap(part -> {
try {
return Warehouse.makePartName(table.getPartitionKeys(), part.getPartition().getValues());
} catch (Exception e) {
throw new InvalidMetaException("One or more partition names are invalid.", e);
}
}, Function.identity()));
} catch (Exception e) {
throw new InvalidMetaException("One or more partition names are invalid.", e);
}
}
use of org.apache.hadoop.hive.metastore.api.Partition in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitionKeys.
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest) {
final String filterExpression = partitionsRequest.getFilter();
final List<String> partitionIds = partitionsRequest.getPartitionNames();
List<String> names = Lists.newArrayList();
final Pageable pageable = partitionsRequest.getPageable();
try {
if (filterExpression != null || (partitionIds != null && !partitionIds.isEmpty())) {
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
for (Partition partition : getPartitions(tableName, filterExpression, partitionIds, partitionsRequest.getSort(), pageable)) {
names.add(getNameOfPartition(table, partition));
}
} else {
names = metacatHiveClient.getPartitionNames(tableName.getDatabaseName(), tableName.getTableName());
return ConnectorUtils.paginate(names, pageable);
}
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions keys for hive table %s", tableName), e);
}
return names;
}
use of org.apache.hadoop.hive.metastore.api.Partition in project metacat by Netflix.
the class HiveConnectorPartitionService method getPartitions.
/**
* {@inheritDoc}.
*/
@Override
public List<PartitionInfo> getPartitions(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest) {
try {
final List<Partition> partitions = getPartitions(tableName, partitionsRequest.getFilter(), partitionsRequest.getPartitionNames(), partitionsRequest.getSort(), partitionsRequest.getPageable());
final Table table = metacatHiveClient.getTableByName(tableName.getDatabaseName(), tableName.getTableName());
final TableInfo tableInfo = hiveMetacatConverters.toTableInfo(tableName, table);
final List<PartitionInfo> partitionInfos = new ArrayList<>();
for (Partition partition : partitions) {
partitionInfos.add(hiveMetacatConverters.toPartitionInfo(tableInfo, partition));
}
return partitionInfos;
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableName, exception);
} catch (MetaException | InvalidObjectException e) {
throw new InvalidMetaException("Invalid metadata for " + tableName, e);
} catch (TException e) {
throw new ConnectorException(String.format("Failed get partitions for hive table %s", tableName), e);
}
}
use of org.apache.hadoop.hive.metastore.api.Partition in project metacat by Netflix.
the class HiveConnectorInfoConverter method fromPartitionInfo.
/**
* Converts from PartitionDto to the connector partition.
*
* @param partition Metacat partition Info
* @return connector partition
*/
@Override
public Partition fromPartitionInfo(final TableInfo tableInfo, final PartitionInfo partition) {
final QualifiedName name = partition.getName();
final List<String> values = Lists.newArrayListWithCapacity(16);
Map<String, String> metadata = partition.getMetadata();
if (metadata == null) {
metadata = new HashMap<>();
// can't use Collections.emptyMap()
// which is immutable and can't be
// modifed by add parts in the embedded
}
final List<FieldInfo> fields = tableInfo.getFields();
List<FieldSchema> fieldSchemas = Collections.emptyList();
if (notNull(fields)) {
fieldSchemas = fields.stream().filter(field -> !field.isPartitionKey()).map(this::metacatToHiveField).collect(Collectors.toList());
}
final StorageDescriptor sd = fromStorageInfo(partition.getSerde(), fieldSchemas);
// using the table level serialization lib
if (notNull(sd.getSerdeInfo()) && notNull(tableInfo.getSerde()) && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
sd.getSerdeInfo().setSerializationLib(tableInfo.getSerde().getSerializationLib());
}
final AuditInfo auditInfo = partition.getAudit();
final int createTime = (notNull(auditInfo) && notNull(auditInfo.getCreatedDate())) ? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
final int lastAccessTime = (notNull(auditInfo) && notNull(auditInfo.getLastModifiedDate())) ? dateToEpochSeconds(auditInfo.getLastModifiedDate()) : 0;
if (null == name) {
return new Partition(values, "", "", createTime, lastAccessTime, sd, metadata);
}
if (notNull(name.getPartitionName())) {
for (String partialPartName : SLASH_SPLITTER.split(partition.getName().getPartitionName())) {
final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
Preconditions.checkState(nameValues.size() == 2, "Unrecognized partition name: " + partition.getName());
values.add(nameValues.get(1));
}
}
final String databaseName = notNull(name.getDatabaseName()) ? name.getDatabaseName() : "";
final String tableName = notNull(name.getTableName()) ? name.getTableName() : "";
return new Partition(values, databaseName, tableName, createTime, lastAccessTime, sd, metadata);
}
use of org.apache.hadoop.hive.metastore.api.Partition in project metacat by Netflix.
the class MetacatHMSHandler method add_drop_partitions.
/**
* Adds and drops partitions in one transaction.
*
* @param databaseName database name
* @param tableName table name
* @param addParts list of partitions
* @param dropParts list of partition values
* @param deleteData if true, deletes the data
* @return true if successful
* @throws NoSuchObjectException Exception if table does not exists
* @throws MetaException Exception if
* @throws TException any internal exception
*/
@SuppressWarnings({ "checkstyle:methodname" })
public boolean add_drop_partitions(final String databaseName, final String tableName, final List<Partition> addParts, final List<List<String>> dropParts, final boolean deleteData) throws NoSuchObjectException, MetaException, TException {
startFunction("add_drop_partitions : db=" + databaseName + " tbl=" + tableName);
if (addParts.size() == 0 && dropParts.size() == 0) {
return true;
}
for (List<String> partVals : dropParts) {
LOG.info("Drop Partition values:" + partVals);
}
for (Partition part : addParts) {
LOG.info("Add Partition values:" + part);
}
boolean ret = false;
Exception ex = null;
try {
ret = addDropPartitionsCore(getMS(), databaseName, tableName, addParts, dropParts, false, null);
} catch (Exception e) {
ex = e;
if (e instanceof MetaException) {
throw (MetaException) e;
} else if (e instanceof InvalidObjectException) {
throw (InvalidObjectException) e;
} else if (e instanceof AlreadyExistsException) {
throw (AlreadyExistsException) e;
} else if (e instanceof NoSuchObjectException) {
throw (NoSuchObjectException) e;
} else {
throw newMetaException(e);
}
} finally {
endFunction("drop_partitions", ret, ex, tableName);
}
return ret;
}
Aggregations