use of com.netflix.metacat.common.server.connectors.ConnectorRequestContext in project metacat by Netflix.
the class HiveConnectorPartitionService method savePartitions.
/**
* By default(checkIfExists=true and aletrIfExists=false), this method adds the provided list of partitions.
* If a partition already exists, it is dropped first before adding it.
* If checkIfExists=false, the method adds the partitions to the table. If a partition already exists,
* an AlreadyExistsException error is thrown.
* If alterIfExists=true, the method updates existing partitions and adds non-existant partitions.
* If a partition in the provided partition list has all the details, then it is used. If the details are missing,
* then the table details are inherited. This is mostly for the storage information.
*/
@Override
public PartitionsSaveResponse savePartitions(final ConnectorRequestContext requestContext, final QualifiedName tableQName, final PartitionsSaveRequest partitionsSaveRequest) {
final String databaseName = tableQName.getDatabaseName();
final String tableName = tableQName.getTableName();
final Table table;
try {
table = metacatHiveClient.getTableByName(databaseName, tableName);
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(tableQName, exception);
} catch (TException e) {
throw new ConnectorException(String.format("Failed getting hive table %s", tableQName), e);
}
// New partitions
final List<PartitionInfo> addedPartitionInfos = Lists.newArrayList();
final List<PartitionInfo> partitionInfos = partitionsSaveRequest.getPartitions();
final List<String> partitionNames = partitionInfos.stream().map(part -> {
final String partitionName = part.getName().getPartitionName();
PartitionUtil.validatePartitionName(partitionName, getPartitionKeys(table.getPartitionKeys()));
return partitionName;
}).collect(Collectors.toList());
// New partition names
final List<String> addedPartitionNames = Lists.newArrayList();
// Updated partition names
final List<String> existingPartitionNames = Lists.newArrayList();
// Existing partitions
final List<PartitionHolder> existingPartitionHolders = Lists.newArrayList();
// Existing partition map
Map<String, PartitionHolder> existingPartitionMap = Collections.emptyMap();
//
if (partitionsSaveRequest.getCheckIfExists() || partitionsSaveRequest.getAlterIfExists()) {
existingPartitionMap = getPartitionsByNames(table, partitionNames);
}
for (PartitionInfo partitionInfo : partitionInfos) {
final String partitionName = partitionInfo.getName().getPartitionName();
final PartitionHolder existingPartitionHolder = existingPartitionMap.get(partitionName);
if (existingPartitionHolder == null) {
addedPartitionNames.add(partitionName);
addedPartitionInfos.add(partitionInfo);
} else {
final String partitionUri = partitionInfo.getSerde() != null ? partitionInfo.getSerde().getUri() : null;
final String existingPartitionUri = getPartitionUri(existingPartitionHolder);
if (partitionUri == null || !partitionUri.equals(existingPartitionUri)) {
existingPartitionNames.add(partitionName);
// We need to copy the existing partition info and
if (partitionInfo.getSerde() == null) {
partitionInfo.setSerde(new StorageInfo());
}
if (partitionInfo.getAudit() == null) {
partitionInfo.setAudit(new AuditInfo());
}
if (StringUtils.isBlank(partitionUri)) {
partitionInfo.getSerde().setUri(existingPartitionUri);
}
// unless we alterifExists
if (partitionsSaveRequest.getAlterIfExists()) {
if (existingPartitionHolder.getPartition() != null) {
final Partition existingPartition = existingPartitionHolder.getPartition();
partitionInfo.getSerde().setParameters(existingPartition.getParameters());
partitionInfo.getAudit().setCreatedDate(HiveConnectorInfoConverter.epochSecondsToDate(existingPartition.getCreateTime()));
partitionInfo.getAudit().setLastModifiedDate(HiveConnectorInfoConverter.epochSecondsToDate(existingPartition.getLastAccessTime()));
} else {
final PartitionInfo existingPartitionInfo = existingPartitionHolder.getPartitionInfo();
if (existingPartitionInfo.getSerde() != null) {
partitionInfo.getSerde().setParameters(existingPartitionInfo.getSerde().getParameters());
}
if (existingPartitionInfo.getAudit() != null) {
partitionInfo.getAudit().setCreatedDate(existingPartitionInfo.getAudit().getCreatedDate());
partitionInfo.getAudit().setLastModifiedDate(existingPartitionInfo.getAudit().getLastModifiedDate());
}
}
existingPartitionHolder.setPartitionInfo(partitionInfo);
existingPartitionHolders.add(existingPartitionHolder);
} else {
addedPartitionInfos.add(partitionInfo);
}
}
}
}
final Set<String> deletePartitionNames = Sets.newHashSet();
if (!partitionsSaveRequest.getAlterIfExists()) {
deletePartitionNames.addAll(existingPartitionNames);
}
if (partitionsSaveRequest.getPartitionIdsForDeletes() != null) {
deletePartitionNames.addAll(partitionsSaveRequest.getPartitionIdsForDeletes());
}
addUpdateDropPartitions(tableQName, table, partitionNames, addedPartitionInfos, existingPartitionHolders, deletePartitionNames);
final PartitionsSaveResponse result = new PartitionsSaveResponse();
result.setAdded(addedPartitionNames);
result.setUpdated(existingPartitionNames);
return result;
}
use of com.netflix.metacat.common.server.connectors.ConnectorRequestContext in project metacat by Netflix.
the class DirectSqlGetPartition method getPartitionCount.
/**
* Number of partitions for the given table.
*
* @param requestContext request context
* @param tableName tableName
* @return Number of partitions
*/
@Transactional(readOnly = true)
public int getPartitionCount(final ConnectorRequestContext requestContext, final QualifiedName tableName) {
final long start = registry.clock().wallTime();
// Handler for reading the result set
final ResultSetExtractor<Integer> handler = rs -> {
int count = 0;
while (rs.next()) {
count = rs.getInt("count");
}
return count;
};
try {
final Optional<QualifiedName> sourceTable = getSourceTableName(tableName.getDatabaseName(), tableName.getTableName(), false);
return sourceTable.map(qualifiedName -> jdbcTemplate.query(SQL.SQL_GET_AUDIT_TABLE_PARTITION_COUNT, new String[] { tableName.getDatabaseName(), tableName.getTableName(), qualifiedName.getDatabaseName(), qualifiedName.getTableName() }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR }, handler)).orElseGet(() -> jdbcTemplate.query(SQL.SQL_GET_PARTITION_COUNT, new String[] { tableName.getDatabaseName(), tableName.getTableName() }, new int[] { Types.VARCHAR, Types.VARCHAR }, handler));
} catch (Exception e) {
throw new ConnectorException("Failed getting the partition count", e);
} finally {
this.fastServiceMetric.recordTimer(HiveMetrics.TagGetPartitionCount.getMetricName(), registry.clock().wallTime() - start);
}
}
use of com.netflix.metacat.common.server.connectors.ConnectorRequestContext in project metacat by Netflix.
the class DirectSqlGetPartition method getPartitionNames.
/**
* getPartitionNames.
*
* @param context request context
* @param uris uris
* @param prefixSearch prefixSearch
* @return partition names
*/
@Transactional(readOnly = true)
public Map<String, List<QualifiedName>> getPartitionNames(@Nonnull final ConnectorRequestContext context, @Nonnull final List<String> uris, final boolean prefixSearch) {
final long start = registry.clock().wallTime();
final Map<String, List<QualifiedName>> result = Maps.newHashMap();
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(SQL.SQL_GET_PARTITION_NAMES_BY_URI);
final List<SqlParameterValue> params = Lists.newArrayList();
if (prefixSearch) {
queryBuilder.append(" 1=2");
uris.forEach(uri -> {
queryBuilder.append(" or location like ?");
params.add(new SqlParameterValue(Types.VARCHAR, uri + "%"));
});
} else {
queryBuilder.append(" location in (");
Joiner.on(',').appendTo(queryBuilder, uris.stream().map(uri -> "?").collect(Collectors.toList()));
queryBuilder.append(")");
params.addAll(uris.stream().map(uri -> new SqlParameterValue(Types.VARCHAR, uri)).collect(Collectors.toList()));
}
final ResultSetExtractor<Map<String, List<QualifiedName>>> handler = rs -> {
while (rs.next()) {
final String schemaName = rs.getString("schema_name");
final String tableName = rs.getString("table_name");
final String partitionName = rs.getString("partition_name");
final String uri = rs.getString("location");
final List<QualifiedName> partitionNames = result.get(uri);
final QualifiedName qualifiedName = QualifiedName.ofPartition(catalogName, schemaName, tableName, partitionName);
if (partitionNames == null) {
result.put(uri, Lists.newArrayList(qualifiedName));
} else {
partitionNames.add(qualifiedName);
}
}
return result;
};
try {
jdbcTemplate.query(queryBuilder.toString(), params.toArray(), handler);
} finally {
this.fastServiceMetric.recordTimer(HiveMetrics.TagGetPartitionNames.getMetricName(), registry.clock().wallTime() - start);
}
return result;
}
use of com.netflix.metacat.common.server.connectors.ConnectorRequestContext in project metacat by Netflix.
the class HiveConnectorDatabaseService method list.
/**
* {@inheritDoc}.
*/
@Override
public List<DatabaseInfo> list(final ConnectorRequestContext requestContext, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
try {
final List<DatabaseInfo> databaseInfos = Lists.newArrayList();
for (String databaseName : metacatHiveClient.getAllDatabases()) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), databaseName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
databaseInfos.add(DatabaseInfo.builder().name(qualifiedName).build());
}
// supporting sort by name only
if (sort != null) {
ConnectorUtils.sort(databaseInfos, sort, Comparator.comparing(p -> p.getName().getDatabaseName()));
}
return ConnectorUtils.paginate(databaseInfos, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed list hive database %s", name), exception);
}
}
use of com.netflix.metacat.common.server.connectors.ConnectorRequestContext in project metacat by Netflix.
the class PolarisConnectorTableService method list.
/**
* {@inheritDoc}.
*/
@Override
public List<TableInfo> list(final ConnectorRequestContext requestContext, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
try {
final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : "";
final List<PolarisTableEntity> tbls = polarisStoreService.getTableEntities(name.getDatabaseName(), tableFilter);
if (sort != null) {
ConnectorUtils.sort(tbls, sort, Comparator.comparing(t -> t.getTblName()));
}
return ConnectorUtils.paginate(tbls, pageable).stream().map(t -> polarisTableMapper.toInfo(t)).collect(Collectors.toList());
} catch (Exception exception) {
final String msg = String.format("Failed polaris list tables %s using prefix %s", name, prefix);
log.error(msg, exception);
throw new ConnectorException(msg, exception);
}
}
Aggregations