use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getpartitions.
private List<PartitionInfo> getpartitions(@Nonnull @NonNull final String databaseName, @Nonnull @NonNull final String tableName, @Nullable final List<String> partitionIds, final String filterExpression, final Sort sort, final Pageable pageable, final boolean includePartitionDetails) {
final FilterPartition filter = new FilterPartition();
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
// Handler for reading the result set
final ResultSetHandler<List<PartitionDetail>> handler = rs -> {
final List<PartitionDetail> result = Lists.newArrayList();
while (rs.next()) {
final String name = rs.getString("name");
final String uri = rs.getString("uri");
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
final Long id = rs.getLong("id");
final Long sdId = rs.getLong("sd_id");
final Long serdeId = rs.getLong("serde_id");
final String inputFormat = rs.getString("input_format");
final String outputFormat = rs.getString("output_format");
final String serializationLib = rs.getString("slib");
final StorageInfo storageInfo = new StorageInfo();
storageInfo.setUri(uri);
storageInfo.setInputFormat(inputFormat);
storageInfo.setOutputFormat(outputFormat);
storageInfo.setSerializationLib(serializationLib);
final AuditInfo auditInfo = new AuditInfo();
auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
result.add(new PartitionDetail(id, sdId, serdeId, PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName, databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
}
}
return result;
};
final List<PartitionInfo> partitionInfos = new ArrayList<>();
final List<PartitionDetail> partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds, SQL_GET_PARTITIONS, handler, sort, pageable);
if (includePartitionDetails && !partitions.isEmpty()) {
final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
for (PartitionDetail partitionDetail : partitions) {
partIds.add(partitionDetail.getId());
sdIds.add(partitionDetail.getSdId());
serdeIds.add(partitionDetail.getSerdeId());
}
final List<ListenableFuture<Void>> futures = Lists.newArrayList();
final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(partIds, SQL_GET_PARTITION_PARAMS, "part_id", partitionParams)));
final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
if (!sdIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(sdIds, SQL_GET_SD_PARAMS, "sd_id", sdParams)));
}
final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
if (!serdeIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(serdeIds, SQL_GET_SERDE_PARAMS, "serde_id", serdeParams)));
}
try {
Futures.transform(Futures.successfulAsList(futures), Functions.constant(null)).get(1, TimeUnit.HOURS);
} catch (Exception e) {
Throwables.propagate(e);
}
for (PartitionDetail partitionDetail : partitions) {
partitionDetail.getPartitionInfo().setMetadata(partitionParams.get(partitionDetail.getId()));
partitionDetail.getPartitionInfo().getSerde().setParameters(sdParams.get(partitionDetail.getSdId()));
partitionDetail.getPartitionInfo().getSerde().setSerdeInfoParameters(serdeParams.get(partitionDetail.getSerdeId()));
}
}
for (PartitionDetail partitionDetail : partitions) {
partitionInfos.add(partitionDetail.getPartitionInfo());
}
return partitionInfos;
}
use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.
the class S3ConnectorInfoConverter method toPartitionInfo.
@Override
public PartitionInfo toPartitionInfo(final TableInfo tableInfo, final Partition partition) {
final QualifiedName tableName = tableInfo.getName();
final StorageInfo storageInfo = tableInfo.getSerde();
storageInfo.setUri(partition.getUri());
final AuditInfo auditInfo = AuditInfo.builder().createdDate(partition.getCreatedDate()).lastModifiedDate(partition.getLastUpdatedDate()).build();
final AuditInfo tableAuditInfo = tableInfo.getAudit();
if (tableAuditInfo != null) {
auditInfo.setCreatedBy(tableAuditInfo.getCreatedBy());
auditInfo.setLastModifiedBy(tableAuditInfo.getLastModifiedBy());
}
return PartitionInfo.builder().name(QualifiedName.ofPartition(tableName.getCatalogName(), tableName.getDatabaseName(), tableName.getTableName(), partition.getName())).serde(storageInfo).auditInfo(auditInfo).build();
}
use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.
the class HiveConnectorInfoConverter method toTableInfo.
/**
* Converts to TableDto.
*
* @param table connector table
* @return Metacat table Info
*/
@Override
public TableInfo toTableInfo(final QualifiedName name, final Table table) {
final List<FieldSchema> nonPartitionColumns = (table.getSd() != null) ? table.getSd().getCols() : Collections.emptyList();
// ignore all exceptions
try {
if (nonPartitionColumns.isEmpty()) {
for (StructField field : HiveTableUtil.getTableStructFields(table)) {
final FieldSchema fieldSchema = new FieldSchema(field.getFieldName(), field.getFieldObjectInspector().getTypeName(), field.getFieldComment());
nonPartitionColumns.add(fieldSchema);
}
}
} catch (final Exception e) {
log.error(e.getMessage(), e);
}
final List<FieldSchema> partitionColumns = table.getPartitionKeys();
final Date creationDate = table.isSetCreateTime() ? epochSecondsToDate(table.getCreateTime()) : null;
final List<FieldInfo> allFields = Lists.newArrayListWithCapacity(nonPartitionColumns.size() + partitionColumns.size());
nonPartitionColumns.stream().map(field -> hiveToMetacatField(field, false)).forEachOrdered(allFields::add);
partitionColumns.stream().map(field -> hiveToMetacatField(field, true)).forEachOrdered(allFields::add);
final AuditInfo auditInfo = AuditInfo.builder().createdDate(creationDate).build();
if (null != table.getTableType() && table.getTableType().equals(TableType.VIRTUAL_VIEW.name())) {
return TableInfo.builder().serde(toStorageInfo(table.getSd(), table.getOwner())).fields(allFields).metadata(table.getParameters()).name(name).auditInfo(auditInfo).view(ViewInfo.builder().viewOriginalText(table.getViewOriginalText()).viewExpandedText(table.getViewExpandedText()).build()).build();
} else {
return TableInfo.builder().serde(toStorageInfo(table.getSd(), table.getOwner())).fields(allFields).metadata(table.getParameters()).name(name).auditInfo(auditInfo).build();
}
}
use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.
the class HiveConnectorInfoConverter method fromPartitionInfo.
/**
* Converts from PartitionDto to the connector partition.
*
* @param partition Metacat partition Info
* @return connector partition
*/
@Override
public Partition fromPartitionInfo(final TableInfo tableInfo, final PartitionInfo partition) {
final QualifiedName name = partition.getName();
final List<String> values = Lists.newArrayListWithCapacity(16);
Map<String, String> metadata = partition.getMetadata();
if (metadata == null) {
metadata = new HashMap<>();
// can't use Collections.emptyMap()
// which is immutable and can't be
// modifed by add parts in the embedded
}
final List<FieldInfo> fields = tableInfo.getFields();
List<FieldSchema> fieldSchemas = Collections.emptyList();
if (notNull(fields)) {
fieldSchemas = fields.stream().filter(field -> !field.isPartitionKey()).map(this::metacatToHiveField).collect(Collectors.toList());
}
final StorageDescriptor sd = fromStorageInfo(partition.getSerde(), fieldSchemas);
// using the table level serialization lib
if (notNull(sd.getSerdeInfo()) && notNull(tableInfo.getSerde()) && Strings.isNullOrEmpty(sd.getSerdeInfo().getSerializationLib())) {
sd.getSerdeInfo().setSerializationLib(tableInfo.getSerde().getSerializationLib());
}
final AuditInfo auditInfo = partition.getAudit();
final int createTime = (notNull(auditInfo) && notNull(auditInfo.getCreatedDate())) ? dateToEpochSeconds(auditInfo.getCreatedDate()) : 0;
final int lastAccessTime = (notNull(auditInfo) && notNull(auditInfo.getLastModifiedDate())) ? dateToEpochSeconds(auditInfo.getLastModifiedDate()) : 0;
if (null == name) {
return new Partition(values, "", "", createTime, lastAccessTime, sd, metadata);
}
if (notNull(name.getPartitionName())) {
for (String partialPartName : SLASH_SPLITTER.split(partition.getName().getPartitionName())) {
final List<String> nameValues = ImmutableList.copyOf(EQUAL_SPLITTER.split(partialPartName));
Preconditions.checkState(nameValues.size() == 2, "Unrecognized partition name: " + partition.getName());
values.add(nameValues.get(1));
}
}
final String databaseName = notNull(name.getDatabaseName()) ? name.getDatabaseName() : "";
final String tableName = notNull(name.getTableName()) ? name.getTableName() : "";
return new Partition(values, databaseName, tableName, createTime, lastAccessTime, sd, metadata);
}
use of com.netflix.metacat.common.server.connectors.model.AuditInfo in project metacat by Netflix.
the class HiveConnectorInfoConverter method toPartitionInfo.
/**
* Converts to PartitionDto.
*
* @param partition connector partition
* @return Metacat partition Info
*/
@Override
public PartitionInfo toPartitionInfo(final TableInfo tableInfo, final Partition partition) {
final QualifiedName tableName = tableInfo.getName();
final QualifiedName partitionName = QualifiedName.ofPartition(tableName.getCatalogName(), tableName.getDatabaseName(), tableName.getTableName(), getNameFromPartVals(tableInfo, partition.getValues()));
final String owner = notNull(tableInfo.getSerde()) ? tableInfo.getSerde().getOwner() : "";
final AuditInfo auditInfo = AuditInfo.builder().createdDate(epochSecondsToDate(partition.getCreateTime())).lastModifiedDate(epochSecondsToDate(partition.getLastAccessTime())).build();
return PartitionInfo.builder().serde(toStorageInfo(partition.getSd(), owner)).name(partitionName).auditInfo(auditInfo).metadata(partition.getParameters()).build();
}
Aggregations