use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class HiveConnectorTableService method create.
/**
* Create a table.
*
* @param requestContext The request context
* @param tableInfo The resource metadata
*/
@Override
public void create(final ConnectorRequestContext requestContext, final TableInfo tableInfo) {
final QualifiedName tableName = tableInfo.getName();
try {
final Table table = hiveMetacatConverters.fromTableInfo(tableInfo);
updateTable(requestContext, table, tableInfo);
metacatHiveClient.createTable(table);
} catch (AlreadyExistsException exception) {
throw new TableAlreadyExistsException(tableName, exception);
} catch (MetaException | InvalidObjectException exception) {
// the NoSuchObjectException is converted into InvalidObjectException in hive client
if (exception.getMessage().startsWith(tableName.getDatabaseName())) {
throw new DatabaseNotFoundException(QualifiedName.ofDatabase(tableName.getCatalogName(), tableName.getDatabaseName()), exception);
} else {
// table name or column invalid defintion exception
throw new InvalidMetaException(tableName, exception);
}
} catch (TException exception) {
throw new ConnectorException(String.format("Failed create hive table %s", tableName), exception);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class HiveConnectorTableService method list.
/**
* {@inheritDoc}.
*/
@Override
public List<TableInfo> list(final ConnectorRequestContext requestContext, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
try {
final List<TableInfo> tableInfos = Lists.newArrayList();
for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
final QualifiedName qualifiedName = QualifiedName.ofDatabase(name.getCatalogName(), tableName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
final Table table = metacatHiveClient.getTableByName(name.getDatabaseName(), tableName);
tableInfos.add(hiveMetacatConverters.toTableInfo(name, table));
}
// supporting sort by name only
if (sort != null) {
ConnectorUtils.sort(tableInfos, sort, Comparator.comparing(p -> p.getName().getTableName()));
}
return ConnectorUtils.paginate(tableInfos, pageable);
} catch (MetaException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed list hive table %s", name), exception);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class HiveConnectorTableService method rename.
/**
* {@inheritDoc}.
*/
@Override
public void rename(final ConnectorRequestContext context, final QualifiedName oldName, final QualifiedName newName) {
if (!allowRenameTable) {
throw new ConnectorException("Renaming tables is disabled in catalog " + catalogName, null);
}
try {
if (onRenameConvertToExternal) {
//
// If this is a managed table(EXTERNAL=FALSE), then convert it to an external table before renaming it.
// We do not want the metastore to move the location/data.
//
final Table table = metacatHiveClient.getTableByName(oldName.getDatabaseName(), oldName.getTableName());
Map<String, String> parameters = table.getParameters();
if (parameters == null) {
parameters = Maps.newHashMap();
table.setParameters(parameters);
}
if (!parameters.containsKey(PARAMETER_EXTERNAL) || parameters.get(PARAMETER_EXTERNAL).equalsIgnoreCase("FALSE")) {
parameters.put(PARAMETER_EXTERNAL, "TRUE");
metacatHiveClient.alterTable(oldName.getDatabaseName(), oldName.getTableName(), table);
}
}
metacatHiveClient.rename(oldName.getDatabaseName(), oldName.getTableName(), newName.getDatabaseName(), newName.getTableName());
} catch (NoSuchObjectException exception) {
throw new TableNotFoundException(oldName, exception);
} catch (MetaException exception) {
throw new InvalidMetaException(newName, exception);
} catch (TException exception) {
throw new ConnectorException("Failed renaming from hive table" + oldName.toString() + " to hive talbe " + newName.toString(), exception);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class DirectSqlSavePartition method _insert.
@SuppressWarnings("checkstyle:methodname")
private void _insert(final QualifiedName tableQName, final Table table, final TableSequenceIds tableSequenceIds, final PartitionSequenceIds partitionSequenceIds, final List<PartitionInfo> partitions, final long currentTimeInEpoch, final int index) {
final List<Object[]> serdesValues = Lists.newArrayList();
final List<Object[]> serdeParamsValues = Lists.newArrayList();
final List<Object[]> sdsValues = Lists.newArrayList();
final List<Object[]> partitionsValues = Lists.newArrayList();
final List<Object[]> partitionParamsValues = Lists.newArrayList();
final List<Object[]> partitionKeyValsValues = Lists.newArrayList();
final List<String> partitionNames = Lists.newArrayList();
int currentIndex = index;
for (PartitionInfo partition : partitions) {
final StorageInfo storageInfo = partition.getSerde();
final long partId = partitionSequenceIds.getPartId() + currentIndex;
final long sdsId = partitionSequenceIds.getSdsId() + currentIndex;
final long serdeId = partitionSequenceIds.getSerdeId() + currentIndex;
final String partitionName = partition.getName().getPartitionName();
final List<String> partValues = PartitionUtil.getPartValuesFromPartName(tableQName, table, partitionName);
final String escapedPartName = PartitionUtil.makePartName(table.getPartitionKeys(), partValues);
partitionsValues.add(new Object[] { 0, tableSequenceIds.getTableId(), currentTimeInEpoch, sdsId, escapedPartName, partId });
for (int i = 0; i < partValues.size(); i++) {
partitionKeyValsValues.add(new Object[] { partId, partValues.get(i), i });
}
// Partition parameters
final Map<String, String> parameters = partition.getMetadata();
if (parameters != null) {
parameters.forEach((key, value) -> partitionParamsValues.add(new Object[] { value, partId, key }));
}
partitionParamsValues.add(new Object[] { currentTimeInEpoch, partId, PARAM_LAST_DDL_TIME });
if (storageInfo != null) {
serdesValues.add(new Object[] { null, storageInfo.getSerializationLib(), serdeId });
final Map<String, String> serdeInfoParameters = storageInfo.getSerdeInfoParameters();
if (serdeInfoParameters != null) {
serdeInfoParameters.forEach((key, value) -> serdeParamsValues.add(new Object[] { value, serdeId, key }));
}
sdsValues.add(new Object[] { storageInfo.getOutputFormat(), false, tableSequenceIds.getCdId(), false, serdeId, storageInfo.getUri(), storageInfo.getInputFormat(), 0, sdsId });
}
partitionNames.add(partitionName);
currentIndex++;
}
try {
jdbcTemplate.batchUpdate(SQL.SERDES_INSERT, serdesValues, new int[] { Types.VARCHAR, Types.VARCHAR, Types.BIGINT });
jdbcTemplate.batchUpdate(SQL.SERDE_PARAMS_INSERT, serdeParamsValues, new int[] { Types.VARCHAR, Types.BIGINT, Types.VARCHAR });
jdbcTemplate.batchUpdate(SQL.SDS_INSERT, sdsValues, new int[] { Types.VARCHAR, Types.BOOLEAN, Types.BIGINT, Types.BOOLEAN, Types.BIGINT, Types.VARCHAR, Types.VARCHAR, Types.INTEGER, Types.BIGINT });
jdbcTemplate.batchUpdate(SQL.PARTITIONS_INSERT, partitionsValues, new int[] { Types.INTEGER, Types.BIGINT, Types.INTEGER, Types.BIGINT, Types.VARCHAR, Types.BIGINT });
jdbcTemplate.batchUpdate(SQL.PARTITION_PARAMS_INSERT, partitionParamsValues, new int[] { Types.VARCHAR, Types.BIGINT, Types.VARCHAR });
jdbcTemplate.batchUpdate(SQL.PARTITION_KEY_VALS_INSERT, partitionKeyValsValues, new int[] { Types.BIGINT, Types.VARCHAR, Types.INTEGER });
} catch (DuplicateKeyException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (Exception e) {
throw new ConnectorException(String.format("Failed inserting partitions %s for table %s", partitionNames, tableQName), e);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class DirectSqlSavePartition method _update.
@SuppressWarnings("checkstyle:methodname")
private void _update(final QualifiedName tableQName, final List<PartitionHolder> partitionHolders, final long currentTimeInEpoch) {
final List<Object[]> serdesValues = Lists.newArrayList();
final List<Object[]> serdeParamsValues = Lists.newArrayList();
final List<Object[]> sdsValues = Lists.newArrayList();
final List<Object[]> partitionParamsValues = Lists.newArrayList();
final List<String> partitionNames = Lists.newArrayList();
for (PartitionHolder partitionHolder : partitionHolders) {
final PartitionInfo partition = partitionHolder.getPartitionInfo();
final StorageInfo storageInfo = partition.getSerde();
final long partId = partitionHolder.getId();
final long sdsId = partitionHolder.getSdId();
final long serdeId = partitionHolder.getSerdeId();
// Partition parameters
final Map<String, String> parameters = partition.getMetadata();
if (parameters != null) {
parameters.forEach((key, value) -> partitionParamsValues.add(new Object[] { value, partId, key, value }));
}
partitionParamsValues.add(new Object[] { currentTimeInEpoch, partId, PARAM_LAST_DDL_TIME, currentTimeInEpoch });
if (storageInfo != null) {
serdesValues.add(new Object[] { null, storageInfo.getSerializationLib(), serdeId });
final Map<String, String> serdeInfoParameters = storageInfo.getSerdeInfoParameters();
if (serdeInfoParameters != null) {
serdeInfoParameters.forEach((key, value) -> serdeParamsValues.add(new Object[] { value, serdeId, key, value }));
}
sdsValues.add(new Object[] { storageInfo.getOutputFormat(), false, false, storageInfo.getUri(), storageInfo.getInputFormat(), sdsId });
}
partitionNames.add(partition.getName().toString());
}
try {
jdbcTemplate.batchUpdate(SQL.SERDES_UPDATE, serdesValues, new int[] { Types.VARCHAR, Types.VARCHAR, Types.BIGINT });
jdbcTemplate.batchUpdate(SQL.SERDE_PARAMS_INSERT_UPDATE, serdeParamsValues, new int[] { Types.VARCHAR, Types.BIGINT, Types.VARCHAR, Types.VARCHAR });
jdbcTemplate.batchUpdate(SQL.SDS_UPDATE, sdsValues, new int[] { Types.VARCHAR, Types.BOOLEAN, Types.BOOLEAN, Types.VARCHAR, Types.VARCHAR, Types.BIGINT });
jdbcTemplate.batchUpdate(SQL.PARTITION_PARAMS_INSERT_UPDATE, partitionParamsValues, new int[] { Types.VARCHAR, Types.BIGINT, Types.VARCHAR, Types.VARCHAR });
} catch (DuplicateKeyException e) {
throw new PartitionAlreadyExistsException(tableQName, partitionNames, e);
} catch (Exception e) {
throw new ConnectorException(String.format("Failed updating partitions %s for table %s", partitionNames, tableQName), e);
}
}
Aggregations