use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class HiveConnectorTableService method listNames.
/**
* {@inheritDoc}.
*/
@Override
public List<QualifiedName> listNames(final ConnectorRequestContext requestContext, final QualifiedName name, @Nullable final QualifiedName prefix, @Nullable final Sort sort, @Nullable final Pageable pageable) {
try {
final List<QualifiedName> qualifiedNames = Lists.newArrayList();
final String tableFilter = (prefix != null && prefix.isTableDefinition()) ? prefix.getTableName() : null;
for (String tableName : metacatHiveClient.getAllTables(name.getDatabaseName())) {
if (tableFilter == null || tableName.startsWith(tableFilter)) {
final QualifiedName qualifiedName = QualifiedName.ofTable(name.getCatalogName(), name.getDatabaseName(), tableName);
if (prefix != null && !qualifiedName.toString().startsWith(prefix.toString())) {
continue;
}
qualifiedNames.add(qualifiedName);
}
}
// //supporting sort by qualified name only
if (sort != null) {
ConnectorUtils.sort(qualifiedNames, sort, Comparator.comparing(QualifiedName::toString));
}
return ConnectorUtils.paginate(qualifiedNames, pageable);
} catch (MetaException exception) {
throw new InvalidMetaException(name, exception);
} catch (NoSuchObjectException exception) {
throw new DatabaseNotFoundException(name, exception);
} catch (TException exception) {
throw new ConnectorException(String.format("Failed listNames hive table %s", name), exception);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class DirectSqlGetPartition method getPartitionCount.
/**
* Number of partitions for the given table.
*
* @param requestContext request context
* @param tableName tableName
* @return Number of partitions
*/
@Transactional(readOnly = true)
public int getPartitionCount(final ConnectorRequestContext requestContext, final QualifiedName tableName) {
final long start = registry.clock().wallTime();
// Handler for reading the result set
final ResultSetExtractor<Integer> handler = rs -> {
int count = 0;
while (rs.next()) {
count = rs.getInt("count");
}
return count;
};
try {
final Optional<QualifiedName> sourceTable = getSourceTableName(tableName.getDatabaseName(), tableName.getTableName(), false);
return sourceTable.map(qualifiedName -> jdbcTemplate.query(SQL.SQL_GET_AUDIT_TABLE_PARTITION_COUNT, new String[] { tableName.getDatabaseName(), tableName.getTableName(), qualifiedName.getDatabaseName(), qualifiedName.getTableName() }, new int[] { Types.VARCHAR, Types.VARCHAR, Types.VARCHAR, Types.VARCHAR }, handler)).orElseGet(() -> jdbcTemplate.query(SQL.SQL_GET_PARTITION_COUNT, new String[] { tableName.getDatabaseName(), tableName.getTableName() }, new int[] { Types.VARCHAR, Types.VARCHAR }, handler));
} catch (Exception e) {
throw new ConnectorException("Failed getting the partition count", e);
} finally {
this.fastServiceMetric.recordTimer(HiveMetrics.TagGetPartitionCount.getMetricName(), registry.clock().wallTime() - start);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class DirectSqlTable method delete.
/**
* Deletes all the table related information from the store.
* @param tableName table name
*/
public void delete(final QualifiedName tableName) {
try {
final TableSequenceIds ids = getSequenceIds(tableName);
directSqlSavePartition.delete(tableName);
jdbcTemplate.update(SQL.UPDATE_SDS_CD, new SqlParameterValue(Types.BIGINT, null), new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.UPDATE_SDS_SERDE, new SqlParameterValue(Types.BIGINT, null), new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
//
try {
jdbcTemplate.update(SQL.DELETE_COLUMNS_OLD, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
} catch (DataAccessException ignored) {
log.debug("Ignore. Probably table COLUMNS_OLD does not exist.");
}
try {
jdbcTemplate.update(SQL.DELETE_TBL_PRIVS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
} catch (DataAccessException ignored) {
log.debug("Ignore. Probably table TBL_PRIVS does not exist.");
}
try {
jdbcTemplate.update(SQL.DELETE_TBL_COL_PRIVS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
} catch (DataAccessException ignored) {
log.debug("Ignore. Probably table TBL_COL_PRIVS does not exist.");
}
jdbcTemplate.update(SQL.DELETE_COLUMNS_V2, new SqlParameterValue(Types.BIGINT, ids.getCdId()));
jdbcTemplate.update(SQL.DELETE_CDS, new SqlParameterValue(Types.BIGINT, ids.getCdId()));
jdbcTemplate.update(SQL.DELETE_PARTITION_KEYS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.DELETE_TABLE_PARAMS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.DELETE_TAB_COL_STATS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.UPDATE_TABLE_SD, new SqlParameterValue(Types.BIGINT, null), new SqlParameterValue(Types.BIGINT, ids.getTableId()));
jdbcTemplate.update(SQL.DELETE_SKEWED_COL_NAMES, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_BUCKETING_COLS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SORT_COLS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SD_PARAMS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SKEWED_COL_VALUE_LOC_MAP, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SKEWED_VALUES, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_SERDE_PARAMS, new SqlParameterValue(Types.BIGINT, ids.getSerdeId()));
jdbcTemplate.update(SQL.DELETE_SERDES, new SqlParameterValue(Types.BIGINT, ids.getSerdeId()));
jdbcTemplate.update(SQL.DELETE_SDS, new SqlParameterValue(Types.BIGINT, ids.getSdsId()));
jdbcTemplate.update(SQL.DELETE_TBLS, new SqlParameterValue(Types.BIGINT, ids.getTableId()));
} catch (DataAccessException e) {
throw new ConnectorException(String.format("Failed delete hive table %s", tableName), e);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class SequenceGeneration method newPartitionSequenceIdByName.
/**
* Returns the current sequence ids and increments the sequence ids by the given <code>size</code>.
*
* @param size number of records getting inserted
* @param sequenceParamName the sequence Parameter Name
* @return current sequence ids
*/
@Transactional(propagation = Propagation.REQUIRES_NEW)
public Long newPartitionSequenceIdByName(final int size, final String sequenceParamName) {
Long result = null;
try {
// Get current sequence number
result = jdbcTemplate.queryForObject(SQL.SEQUENCE_NEXT_VAL_BYNAME, new Object[] { sequenceParamName }, Long.class);
} catch (EmptyResultDataAccessException e) {
log.warn("Failed getting the sequence ids for partition", e);
} catch (Exception e) {
throw new ConnectorException("Failed retrieving the sequence numbers.");
}
try {
if (result == null) {
// init to 1L in case there's no records
result = 1L;
jdbcTemplate.update(SQL.SEQUENCE_INSERT_VAL, result + size, sequenceParamName);
} else {
jdbcTemplate.update(SQL.SEQUENCE_UPDATE_VAL, result + size, sequenceParamName);
}
return result;
} catch (Exception e) {
throw new ConnectorException("Failed updating the sequence ids for partition", e);
}
}
use of com.netflix.metacat.common.server.connectors.exception.ConnectorException in project metacat by Netflix.
the class EmbeddedHiveClient method callWrap.
private <R> R callWrap(final String requestName, final Callable<R> supplier) throws TException {
final long start = registry.clock().wallTime();
final Map<String, String> tags = new HashMap<String, String>();
tags.put("request", requestName);
try {
return supplier.call();
} catch (MetaException e) {
handleSqlException(e);
if (e.getCause() instanceof NucleusDataStoreException) {
throw new ConnectorException(e.getMessage(), e.getCause());
}
throw e;
} catch (TException e) {
handleSqlException(e);
throw e;
} catch (Exception e) {
throw new TException(e.getMessage(), e.getCause());
} finally {
final long duration = registry.clock().wallTime() - start;
log.debug("### Time taken to complete {} is {} ms", requestName, duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
}
Aggregations