use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class PartitionServiceImpl method getPartitionKeys.
@Override
public List<String> getPartitionKeys(final QualifiedName name, final String filter, final List<String> partitionNames, final Sort sort, final Pageable pageable) {
List<String> result = Lists.newArrayList();
if (tableService.exists(name)) {
final MetacatRequestContext metacatRequestContext = MetacatContextManager.getContext();
final ConnectorPartitionService service = connectorManager.getPartitionService(name.getCatalogName());
final GetPartitionsRequestDto requestDto = new GetPartitionsRequestDto();
requestDto.setFilter(filter);
requestDto.setPartitionNames(partitionNames);
final ConnectorContext connectorContext = converterUtil.toConnectorContext(metacatRequestContext);
try {
result = service.getPartitionKeys(connectorContext, name, converterUtil.toPartitionListRequest(requestDto, pageable, sort));
} catch (final UnsupportedOperationException uoe) {
log.debug("Catalog {} doesn't support getPartitionKeys. Ignoring.", name.getCatalogName());
}
}
return result;
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getPartitionCount.
/**
* Number of partitions for the given table.
*
* @param tableName tableName
* @return Number of partitions
*/
@Override
public int getPartitionCount(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName) {
final long start = registry.clock().monotonicTime();
final Map<String, String> tags = new HashMap<String, String>();
tags.put("request", HiveMetrics.getPartitionCount.name());
final Integer result;
final DataSource dataSource = DataSourceManager.get().get(catalogName);
try (Connection conn = dataSource.getConnection()) {
// Handler for reading the result set
final ResultSetHandler<Integer> handler = rs -> {
int count = 0;
while (rs.next()) {
count = rs.getInt("count");
}
return count;
};
result = new QueryRunner().query(conn, SQL_GET_PARTITION_COUNT, handler, tableName.getDatabaseName(), tableName.getTableName());
} catch (SQLException e) {
throw new ConnectorException("getPartitionCount", e);
} finally {
final long duration = registry.clock().monotonicTime() - start;
log.debug("### Time taken to complete getPartitionCount is {} ms", duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
return result;
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class HiveConnectorTableService method list.
/**
* {@inheritDoc}.
*/
@Override
public List<TableInfo> list(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
try {
final List<TableInfo> tableInfos = Lists.newArrayList();
for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), tableName);
if (!qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
final Table table = metacatHiveClient.getTableByName(name.getDatabaseName(), tableName);
tableInfos.add(hiveMetacatConverters.toTableInfo(name, table));
}
//supporting sort by name only
if (sort != null) {
ConnectorUtils.sort(tableInfos, sort, Comparator.comparing(p -> p.getName().getTableName()));
}
return ConnectorUtils.paginate(tableInfos, pageable);
} catch (MetaException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed list hive table %s", name), exception);
}
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class CatalogManager method loadCatalog.
private void loadCatalog(final File file) throws Exception {
log.info("-- Loading catalog {} --", file);
final Map<String, String> properties = new HashMap<>(this.loadProperties(file));
final String connectorType = properties.remove(MetacatCatalogConfig.Keys.CONNECTOR_NAME);
Preconditions.checkState(connectorType != null, "Catalog configuration %s does not contain connector.name", file.getAbsoluteFile());
// Catalog shard name should be unique. Usually the catalog name is same as the catalog shard name.
// If multiple catalog property files refer the same catalog name, then there will be multiple shard names
// with the same catalog name.
final String catalogShardName = Files.getNameWithoutExtension(file.getName());
// If catalog name is not specified, then use the catalog shard name.
final String catalogName = properties.getOrDefault(MetacatCatalogConfig.Keys.CATALOG_NAME, catalogShardName);
final ConnectorContext connectorContext = new ConnectorContext(catalogName, catalogShardName, connectorType, config, registry, properties);
this.connectorManager.createConnection(connectorContext);
log.info("-- Added catalog {} shard {} using connector {} --", catalogName, catalogShardName, connectorType);
}
use of com.netflix.metacat.common.server.connectors.ConnectorContext in project metacat by Netflix.
the class PolarisConnectorTableServiceTest method init.
/**
* Initialization.
*/
@BeforeEach
public void init() {
connectorContext = new ConnectorContext(CATALOG_NAME, CATALOG_NAME, "polaris", new DefaultConfigImpl(new MetacatProperties()), new NoopRegistry(), null, Maps.newHashMap());
polarisDBService = new PolarisConnectorDatabaseService(polarisStoreService, connectorContext);
polarisTableService = new PolarisConnectorTableService(polarisStoreService, CATALOG_NAME, polarisDBService, new HiveConnectorInfoConverter(new HiveTypeConverter()), new IcebergTableHandler(connectorContext, new IcebergTableCriteriaImpl(connectorContext), new IcebergTableOpWrapper(connectorContext, serviceManager), new IcebergTableOpsProxy()), new PolarisTableMapper(CATALOG_NAME), connectorContext);
}
Aggregations