Search in sources :

Example 16 with ConnectorException

use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.

the class HiveConnectorTableService method create.

/**
 * Create a table.
 *
 * @param requestContext The request context
 * @param tableInfo      The resource metadata
 */
@Override
public void create(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
    final QualifiedName tableName = tableInfo.getName();
    try {
        final Table table = hiveMetacatConverters.fromTableInfo(tableInfo);
        updateTable(requestContext, table, tableInfo);
        metacatHiveClient.createTable(table);
    } catch (AlreadyExistsException exception) {
        throw new TableAlreadyExistsException(tableName, exception);
    } catch (MetaException | InvalidObjectException exception) {
        // the NoSuchObjectException is converted into InvalidObjectException in hive client
        if (exception.getMessage().startsWith(tableName.getDatabaseName())) {
            throw new DatabaseNotFoundException(QualifiedName.ofDatabase(tableName.getCatalogName(), tableName.getDatabaseName()), exception);
        } else {
            // table name or column invalid defintion exception
            throw new InvalidMetaException(tableName, exception);
        }
    } catch (TException exception) {
        throw new ConnectorException(String.format("Failed create hive table %s", tableName), exception);
    }
}
Also used : TException(org.apache.thrift.TException) TableAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException) Table(org.apache.hadoop.hive.metastore.api.Table) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) TableAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException) QualifiedName(com.netflix.metacat.common.QualifiedName) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Example 17 with ConnectorException

use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.

the class HiveConnectorTableService method list.

/**
 * {@inheritDoc}.
 */
@Override
public List<TableInfo> list(final ConnectorRequestContext requestContext, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
    try {
        final List<TableInfo> tableInfos = Lists.newArrayList();
        for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
            final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), tableName);
            if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
                continue;
            }
            final Table table = metacatHiveClient.getTableByName(name.getDatabaseName(), tableName);
            tableInfos.add(hiveMetacatConverters.toTableInfo(name, table));
        }
        // supporting sort by name only
        if (sort != null) {
            ConnectorUtils.sort(tableInfos, sort, Comparator.comparing(p -> p.getName().getTableName()));
        }
        return ConnectorUtils.paginate(tableInfos, pageable);
    } catch (MetaException exception) {
        throw new DatabaseNotFoundException(name, exception);
    } catch (TException exception) {
        throw new ConnectorException(String.format("Failed list hive table %s", name), exception);
    }
}
Also used : Getter(lombok.Getter) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) HiveConfigConstants(com.netflix.metacat.connector.hive.util.HiveConfigConstants) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) AlreadyExistsException(org.apache.hadoop.hive.metastore.api.AlreadyExistsException) Strings(com.google.common.base.Strings) ConnectorTableService(com.netflix.metacat.common.server.connectors.ConnectorTableService) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) FieldInfo(com.netflix.metacat.common.server.connectors.model.FieldInfo) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) MetacatBadRequestException(com.netflix.metacat.common.exception.MetacatBadRequestException) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) ConnectorContext(com.netflix.metacat.common.server.connectors.ConnectorContext) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) HiveConnectorInfoConverter(com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) ConnectorRequestContext(com.netflix.metacat.common.server.connectors.ConnectorRequestContext) Nullable(javax.annotation.Nullable) ImmutableMap(com.google.common.collect.ImmutableMap) Pageable(com.netflix.metacat.common.dto.Pageable) TException(org.apache.thrift.TException) QualifiedName(com.netflix.metacat.common.QualifiedName) InvalidObjectException(org.apache.hadoop.hive.metastore.api.InvalidObjectException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) Maps(com.google.common.collect.Maps) Collectors(java.util.stream.Collectors) Table(org.apache.hadoop.hive.metastore.api.Table) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Slf4j(lombok.extern.slf4j.Slf4j) List(java.util.List) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) HiveTableUtil(com.netflix.metacat.connector.hive.util.HiveTableUtil) TableAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException) TableType(org.apache.hadoop.hive.metastore.TableType) ConnectorUtils(com.netflix.metacat.common.server.connectors.ConnectorUtils) Comparator(java.util.Comparator) Collections(java.util.Collections) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) Sort(com.netflix.metacat.common.dto.Sort) TException(org.apache.thrift.TException) Table(org.apache.hadoop.hive.metastore.api.Table) QualifiedName(com.netflix.metacat.common.QualifiedName) DatabaseNotFoundException(com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) TableInfo(com.netflix.metacat.common.server.connectors.model.TableInfo) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Example 18 with ConnectorException

use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.

the class HiveConnectorTableService method rename.

/**
 * {@inheritDoc}.
 */
@Override
public void rename(final ConnectorRequestContext context, final QualifiedName oldName, final QualifiedName newName) {
    if (!allowRenameTable) {
        throw new ConnectorException("Renaming tables is disabled in catalog " + catalogName, null);
    }
    try {
        if (onRenameConvertToExternal) {
            // 
            // If this is a managed table(EXTERNAL=FALSE), then convert it to an external table before renaming it.
            // We do not want the metastore to move the location/data.
            // 
            final Table table = metacatHiveClient.getTableByName(oldName.getDatabaseName(), oldName.getTableName());
            Map<String, String> parameters = table.getParameters();
            if (parameters == null) {
                parameters = Maps.newHashMap();
                table.setParameters(parameters);
            }
            if (!parameters.containsKey(PARAMETER_EXTERNAL) || parameters.get(PARAMETER_EXTERNAL).equalsIgnoreCase("FALSE")) {
                parameters.put(PARAMETER_EXTERNAL, "TRUE");
                metacatHiveClient.alterTable(oldName.getDatabaseName(), oldName.getTableName(), table);
            }
        }
        metacatHiveClient.rename(oldName.getDatabaseName(), oldName.getTableName(), newName.getDatabaseName(), newName.getTableName());
    } catch (NoSuchObjectException exception) {
        throw new TableNotFoundException(oldName, exception);
    } catch (MetaException exception) {
        throw new InvalidMetaException(newName, exception);
    } catch (TException exception) {
        throw new ConnectorException("Failed renaming from hive table" + oldName.toString() + " to hive talbe " + newName.toString(), exception);
    }
}
Also used : TException(org.apache.thrift.TException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) Table(org.apache.hadoop.hive.metastore.api.Table) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) NoSuchObjectException(org.apache.hadoop.hive.metastore.api.NoSuchObjectException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException) MetaException(org.apache.hadoop.hive.metastore.api.MetaException) InvalidMetaException(com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)

Example 19 with ConnectorException

use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.

the class DirectSqlSavePartition method _insert.

@SuppressWarnings("checkstyle:methodname")
private void _insert(final QualifiedName tableQName, final Table table, final TableSequenceIds tableSequenceIds, final PartitionSequenceIds partitionSequenceIds, final List<PartitionInfo> partitions, final long currentTimeInEpoch, final int index) {
    final List<Object[]> serdesValues = Lists.newArrayList();
    final List<Object[]> serdeParamsValues = Lists.newArrayList();
    final List<Object[]> sdsValues = Lists.newArrayList();
    final List<Object[]> partitionsValues = Lists.newArrayList();
    final List<Object[]> partitionParamsValues = Lists.newArrayList();
    final List<Object[]> partitionKeyValsValues = Lists.newArrayList();
    final List<String> partitionNames = Lists.newArrayList();
    int currentIndex = index;
    for (PartitionInfo partition : partitions) {
        final StorageInfo storageInfo = partition.getSerde();
        final long partId = partitionSequenceIds.getPartId() + currentIndex;
        final long sdsId = partitionSequenceIds.getSdsId() + currentIndex;
        final long serdeId = partitionSequenceIds.getSerdeId() + currentIndex;
        final String partitionName = partition.getName().getPartitionName();
        final List<String> partValues = PartitionUtil.getPartValuesFromPartName(tableQName, table, partitionName);
        final String escapedPartName = PartitionUtil.makePartName(table.getPartitionKeys(), partValues);
        partitionsValues.add(new Object[] { 0, tableSequenceIds.getTableId(), currentTimeInEpoch, sdsId, escapedPartName, partId });
        for (int i = 0; i < partValues.size(); i++) {
            partitionKeyValsValues.add(new Object[] { partId, partValues.get(i), i });
        }
        // Partition parameters
        final Map<String, String> parameters = partition.getMetadata();
        if (parameters != null) {
            parameters.forEach((key, value) -> partitionParamsValues.add(new Object[] { value, partId, key }));
        }
        partitionParamsValues.add(new Object[] { currentTimeInEpoch, partId, PARAM_LAST_DDL_TIME });
        if (storageInfo != null) {
            serdesValues.add(new Object[] { null, storageInfo.getSerializationLib(), serdeId });
            final Map<String, String> serdeInfoParameters = storageInfo.getSerdeInfoParameters();
            if (serdeInfoParameters != null) {
                serdeInfoParameters.forEach((key, value) -> serdeParamsValues.add(new Object[] { value, serdeId, key }));
            }
            sdsValues.add(new Object[] { storageInfo.getOutputFormat(), false, tableSequenceIds.getCdId(), false, serdeId, storageInfo.getUri(), storageInfo.getInputFormat(), 0, sdsId });
        }
        partitionNames.add(partitionName);
        currentIndex++;
    }
    try {
        jdbcTemplate.batchUpdate(SQL.SERDES_INSERT, serdesValues, new int[] { Types.VARCHAR, Types.VARCHAR, Types.BIGINT });
        jdbcTemplate.batchUpdate(SQL.SERDE_PARAMS_INSERT, serdeParamsValues, new int[] { Types.VARCHAR, Types.BIGINT, Types.VARCHAR });
        jdbcTemplate.batchUpdate(SQL.SDS_INSERT, sdsValues, new int[] { Types.VARCHAR, Types.BOOLEAN, Types.BIGINT, Types.BOOLEAN, Types.BIGINT, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.BIGINT });
        jdbcTemplate.batchUpdate(SQL.PARTITIONS_INSERT, partitionsValues, new int[] { Types.INTEGER, Types.BIGINT, Types.INTEGER, Types.BIGINT, Types.VARCHAR, Types.BIGINT });
        jdbcTemplate.batchUpdate(SQL.PARTITION_PARAMS_INSERT, partitionParamsValues, new int[] { Types.VARCHAR, Types.BIGINT, Types.VARCHAR });
        jdbcTemplate.batchUpdate(SQL.PARTITION_KEY_VALS_INSERT, partitionKeyValsValues, new int[] { Types.BIGINT, Types.VARCHAR, Types.INTEGER });
    } catch (DuplicateKeyException e) {
        throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
    } catch (Exception e) {
        throw new ConnectorException(String.format("Failed inserting partitions %s for table %s", partitionNames, tableQName), e);
    }
}
Also used : DuplicateKeyException(org.springframework.dao.DuplicateKeyException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) EmptyResultDataAccessException(org.springframework.dao.EmptyResultDataAccessException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) DuplicateKeyException(org.springframework.dao.DuplicateKeyException) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException)

Example 20 with ConnectorException

use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.

the class DirectSqlSavePartition method _update.

@SuppressWarnings("checkstyle:methodname")
private void _update(final QualifiedName tableQName, final List<PartitionHolder> partitionHolders, final long currentTimeInEpoch) {
    final List<Object[]> serdesValues = Lists.newArrayList();
    final List<Object[]> serdeParamsValues = Lists.newArrayList();
    final List<Object[]> sdsValues = Lists.newArrayList();
    final List<Object[]> partitionParamsValues = Lists.newArrayList();
    final List<String> partitionNames = Lists.newArrayList();
    for (PartitionHolder partitionHolder : partitionHolders) {
        final PartitionInfo partition = partitionHolder.getPartitionInfo();
        final StorageInfo storageInfo = partition.getSerde();
        final long partId = partitionHolder.getId();
        final long sdsId = partitionHolder.getSdId();
        final long serdeId = partitionHolder.getSerdeId();
        // Partition parameters
        final Map<String, String> parameters = partition.getMetadata();
        if (parameters != null) {
            parameters.forEach((key, value) -> partitionParamsValues.add(new Object[] { value, partId, key, value }));
        }
        partitionParamsValues.add(new Object[] { currentTimeInEpoch, partId, PARAM_LAST_DDL_TIME, currentTimeInEpoch });
        if (storageInfo != null) {
            serdesValues.add(new Object[] { null, storageInfo.getSerializationLib(), serdeId });
            final Map<String, String> serdeInfoParameters = storageInfo.getSerdeInfoParameters();
            if (serdeInfoParameters != null) {
                serdeInfoParameters.forEach((key, value) -> serdeParamsValues.add(new Object[] { value, serdeId, key, value }));
            }
            sdsValues.add(new Object[] { storageInfo.getOutputFormat(), false, false, storageInfo.getUri(), storageInfo.getInputFormat(), sdsId });
        }
        partitionNames.add(partition.getName().toString());
    }
    try {
        jdbcTemplate.batchUpdate(SQL.SERDES_UPDATE, serdesValues, new int[] { Types.VARCHAR, Types.VARCHAR, Types.BIGINT });
        jdbcTemplate.batchUpdate(SQL.SERDE_PARAMS_INSERT_UPDATE, serdeParamsValues, new int[] { Types.VARCHAR, Types.BIGINT, Types.VARCHAR, Types.VARCHAR });
        jdbcTemplate.batchUpdate(SQL.SDS_UPDATE, sdsValues, new int[] { Types.VARCHAR, Types.BOOLEAN, Types.BOOLEAN, Types.VARCHAR, Types.VARCHAR, Types.BIGINT });
        jdbcTemplate.batchUpdate(SQL.PARTITION_PARAMS_INSERT_UPDATE, partitionParamsValues, new int[] { Types.VARCHAR, Types.BIGINT, Types.VARCHAR, Types.VARCHAR });
    } catch (DuplicateKeyException e) {
        throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
    } catch (Exception e) {
        throw new ConnectorException(String.format("Failed updating partitions %s for table %s", partitionNames, tableQName), e);
    }
}
Also used : DuplicateKeyException(org.springframework.dao.DuplicateKeyException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) EmptyResultDataAccessException(org.springframework.dao.EmptyResultDataAccessException) TableNotFoundException(com.netflix.metacat.common.server.connectors.exception.TableNotFoundException) DuplicateKeyException(org.springframework.dao.DuplicateKeyException) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException) ConnectorException(com.netflix.metacat.common.server.connectors.exception.ConnectorException) StorageInfo(com.netflix.metacat.common.server.connectors.model.StorageInfo) PartitionInfo(com.netflix.metacat.common.server.connectors.model.PartitionInfo) PartitionAlreadyExistsException(com.netflix.metacat.common.server.connectors.exception.PartitionAlreadyExistsException)

Aggregations

ConnectorException (com.netflix.metacat.common.server.connectors.exception.ConnectorException)46 InvalidMetaException (com.netflix.metacat.common.server.connectors.exception.InvalidMetaException)35 QualifiedName (com.netflix.metacat.common.QualifiedName)26 TableNotFoundException (com.netflix.metacat.common.server.connectors.exception.TableNotFoundException)25 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)24 TException (org.apache.thrift.TException)24 NoSuchObjectException (org.apache.hadoop.hive.metastore.api.NoSuchObjectException)20 Table (org.apache.hadoop.hive.metastore.api.Table)17 InvalidObjectException (org.apache.hadoop.hive.metastore.api.InvalidObjectException)16 Pageable (com.netflix.metacat.common.dto.Pageable)15 DatabaseNotFoundException (com.netflix.metacat.common.server.connectors.exception.DatabaseNotFoundException)15 TableAlreadyExistsException (com.netflix.metacat.common.server.connectors.exception.TableAlreadyExistsException)13 Sort (com.netflix.metacat.common.dto.Sort)12 List (java.util.List)12 Nullable (javax.annotation.Nullable)12 Lists (com.google.common.collect.Lists)11 ConnectorContext (com.netflix.metacat.common.server.connectors.ConnectorContext)11 TableInfo (com.netflix.metacat.common.server.connectors.model.TableInfo)11 ConnectorUtils (com.netflix.metacat.common.server.connectors.ConnectorUtils)10 HiveConnectorInfoConverter (com.netflix.metacat.connector.hive.converters.HiveConnectorInfoConverter)10