use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class HiveConnectorFastTableService method getTableNames.
@Override
public Map<String, List<QualifiedName>> getTableNames(@Nonnull final ConnectorContext context, @Nonnull final List<String> uris, final boolean prefixSearch) {
final long start = registry.clock().monotonicTime();
final Map<String, String> tags = new HashMap<String, String>();
tags.put("request", HiveMetrics.getTableNames.name());
final Map<String, List<QualifiedName>> result = Maps.newHashMap();
// Get data source
final DataSource dataSource = DataSourceManager.get().get(catalogName);
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(SQL_GET_TABLE_NAMES_BY_URI);
final List<String> params = Lists.newArrayList();
if (prefixSearch) {
queryBuilder.append(" and (1=0");
uris.forEach(uri -> {
queryBuilder.append(" or location like ?");
params.add(uri + "%");
});
queryBuilder.append(" )");
} else {
queryBuilder.append(" and location in (");
uris.forEach(uri -> {
queryBuilder.append("?,");
params.add(uri);
});
queryBuilder.deleteCharAt(queryBuilder.length() - 1).append(")");
}
// Handler for reading the result set
ResultSetHandler<Map<String, List<QualifiedName>>> handler = rs -> {
while (rs.next()) {
final String schemaName = rs.getString("schema_name");
final String tableName = rs.getString("table_name");
final String uri = rs.getString("location");
List<QualifiedName> names = result.get(uri);
if (names == null) {
names = Lists.newArrayList();
result.put(uri, names);
}
names.add(QualifiedName.ofTable(catalogName, schemaName, tableName));
}
return result;
};
try (Connection conn = dataSource.getConnection()) {
new QueryRunner().query(conn, queryBuilder.toString(), handler, params.toArray());
} catch (SQLException e) {
throw Throwables.propagate(e);
} finally {
final long duration = registry.clock().monotonicTime() - start;
log.debug("### Time taken to complete getTableNames is {} ms", duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
return result;
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class MViewServiceImpl method list.
@Override
public List<NameDateDto> list(@Nonnull final QualifiedName name) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final QualifiedName viewDbName = QualifiedName.ofDatabase(name.getCatalogName(), VIEW_DB_NAME);
final ConnectorTableService service = connectorManager.getTableService(name.getCatalogName());
List<QualifiedName> tableNames = Lists.newArrayList();
try {
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
tableNames = service.listNames(connectorContext, viewDbName, null, null, null);
} catch (Exception ignored) {
// ignore. Return an empty list if database 'franklinviews' does not exist
}
if (!name.isDatabaseDefinition() && name.isCatalogDefinition()) {
return tableNames.stream().map(viewName -> {
final NameDateDto dto = new NameDateDto();
dto.setName(viewName);
return dto;
}).collect(Collectors.toList());
} else {
final String prefix = String.format("%s_%s_", name.getDatabaseName(), MoreObjects.firstNonNull(name.getTableName(), ""));
return tableNames.stream().filter(qualifiedTableName -> qualifiedTableName.getTableName().startsWith(prefix)).map(qualifiedTableName -> {
final NameDateDto dto = new NameDateDto();
dto.setName(QualifiedName.ofView(qualifiedTableName.getCatalogName(), name.getDatabaseName(), name.getTableName(), qualifiedTableName.getTableName().substring(prefix.length())));
return dto;
}).collect(Collectors.toList());
}
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class PartitionServiceImpl method getPartitionUris.
@Override
public List<String> getPartitionUris(final QualifiedName name, final String filter, final List<String> partitionNames, final Sort sort, final Pageable pageable) {
List<String> result = Lists.newArrayList();
if (tableService.exists(name)) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name.getCatalogName());
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto();
requestDto.setFilter(filter);
requestDto.setPartitionNames(partitionNames);
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
try {
result = service.getPartitionUris(connectorContext, name, converterUtil.toPartitionListRequest(requestDto, pageable, sort));
} catch (final UnsupportedOperationException uoe) {
log.info("Catalog {} doesn't support getPartitionUris. Ignoring.", name.getCatalogName());
}
}
return result;
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class PartitionServiceImpl method delete.
@Override
public void delete(final QualifiedName name, final List<String> partitionIds) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
registry.gauge(this.partitionDeletedCountId.withTags(new HashMap<>(name.parts())), partitionIds.size());
if (!tableService.exists(name)) {
throw new TableNotFoundException(name);
}
if (!partitionIds.isEmpty()) {
final PartitionsSaveRequestDto dto = new PartitionsSaveRequestDto();
dto.setPartitionIdsForDeletes(partitionIds);
eventBus.postSync(new MetacatDeleteTablePartitionPreEvent(name, metacatRequestContext, this, dto));
final ConnectorPartitionService service = connectorManager.getPartitionService(name.getCatalogName());
// Get the partitions before calling delete
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto();
requestDto.setIncludePartitionDetails(false);
requestDto.setPartitionNames(partitionIds);
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
final List<PartitionInfo> partitionInfos = service.getPartitions(connectorContext, name, converterUtil.toPartitionListRequest(requestDto, null, null));
List<HasMetadata> partitions = Lists.newArrayList();
if (partitionInfos != null) {
partitions = partitionInfos.stream().map(converterUtil::toPartitionDto).collect(Collectors.toList());
}
log.info("Deleting partitions with names {} for {}", partitionIds, name);
service.deletePartitions(connectorContext, name, partitionIds);
// delete metadata
log.info("Deleting user metadata for partitions with names {} for {}", partitionIds, name);
if (!partitions.isEmpty()) {
deleteMetadatas(metacatRequestContext.getUserName(), partitions);
}
eventBus.postAsync(new MetacatDeleteTablePartitionPostEvent(name, metacatRequestContext, this, partitionIds));
}
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class PartitionServiceImpl method getQualifiedNames.
@Override
public Map<String, List<QualifiedName>> getQualifiedNames(final List<String> uris, final boolean prefixSearch) {
final Map<String, List<QualifiedName>> result = Maps.newConcurrentMap();
final List<ListenableFuture<Void>> futures = Lists.newArrayList();
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
catalogService.getCatalogNames().forEach(catalog -> {
futures.add(threadServiceManager.getExecutor().submit(() -> {
final ConnectorPartitionService service = connectorManager.getPartitionService(catalog.getCatalogName());
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
try {
final Map<String, List<QualifiedName>> partitionNames = service.getPartitionNames(connectorContext, uris, prefixSearch);
partitionNames.forEach((uri, subPartitionNames) -> {
final List<QualifiedName> existingPartitionNames = result.get(uri);
if (existingPartitionNames == null) {
result.put(uri, subPartitionNames);
} else {
existingPartitionNames.addAll(subPartitionNames);
}
});
} catch (final UnsupportedOperationException uoe) {
log.debug("Catalog {} doesn't support getPartitionNames. Ignoring.", catalog.getCatalogName());
}
return null;
}));
});
try {
Futures.allAsList(futures).get(1, TimeUnit.HOURS);
} catch (Exception e) {
Throwables.propagate(e);
}
return result;
}
Aggregations