use of org.springframework.jdbc.core.ResultSetExtractor in project opennms by OpenNMS.
the class AlarmdIT method changeFields.
@Test
public void changeFields() throws InterruptedException, SQLException {
assertEmptyAlarmTable();
String reductionKey = "testUpdateField";
MockNode node1 = m_mockNetwork.getNode(1);
// Verify we have the default alarm
sendNodeDownEvent(reductionKey, node1);
int severity = m_jdbcTemplate.queryForObject("select severity from alarms a where a.reductionKey = ?", new Object[] { reductionKey }, Integer.class).intValue();
assertEquals(OnmsSeverity.MAJOR, OnmsSeverity.get(severity));
// Store the original logmsg from the original alarm (we are about to test changing it with subsequent alarm reduction)
String defaultLogMsg = m_jdbcTemplate.query("select logmsg from alarms", new ResultSetExtractor<String>() {
@Override
public String extractData(ResultSet results) throws SQLException, DataAccessException {
results.next();
int row = results.getRow();
boolean isLast = results.isLast();
boolean isFirst = results.isFirst();
if (row != 1 && !isLast && !isFirst) {
throw new SQLException("Row count is not = 1. There should only be one row returned from the query: \n" + results.getStatement());
}
return results.getString(1);
}
});
assertTrue("The logmsg column should not be null", defaultLogMsg != null);
// Duplicate the alarm but change the severity and verify the change
sendNodeDownEventWithUpdateFieldSeverity(reductionKey, node1, OnmsSeverity.CRITICAL);
severity = m_jdbcTemplate.queryForObject("select severity from alarms", Integer.class).intValue();
assertEquals("Severity should now be Critical", OnmsSeverity.CRITICAL, OnmsSeverity.get(severity));
// Duplicate the alarm but don't force the change of severity
sendNodeDownEvent(reductionKey, node1);
severity = m_jdbcTemplate.queryForObject("select severity from alarms", Integer.class).intValue();
assertEquals("Severity should still be Critical", OnmsSeverity.CRITICAL, OnmsSeverity.get(severity));
// Duplicate the alarm and change the logmsg
sendNodeDownEventChangeLogMsg(reductionKey, node1, "new logMsg");
String newLogMsg = m_jdbcTemplate.query("select logmsg from alarms", new ResultSetExtractor<String>() {
@Override
public String extractData(ResultSet results) throws SQLException, DataAccessException {
results.next();
return results.getString(1);
}
});
assertEquals("new logMsg", newLogMsg);
assertTrue(!newLogMsg.equals(defaultLogMsg));
// Duplicate the alarm but force logmsg to not change (lggmsg field is updated by default)
sendNodeDownEventDontChangeLogMsg(reductionKey, node1, "newer logMsg");
newLogMsg = m_jdbcTemplate.query("select logmsg from alarms", new ResultSetExtractor<String>() {
@Override
public String extractData(ResultSet results) throws SQLException, DataAccessException {
results.next();
return results.getString(1);
}
});
assertTrue("The logMsg should not have changed.", !"newer logMsg".equals(newLogMsg));
assertEquals("The logMsg should still be equal to the previous update.", "new logMsg", newLogMsg);
// Duplicate the alarm with the default configuration and verify the logmsg has changed (as is the default behavior
// for this field)
sendNodeDownEvent(reductionKey, node1);
newLogMsg = m_jdbcTemplate.query("select logmsg from alarms", new ResultSetExtractor<String>() {
@Override
public String extractData(ResultSet results) throws SQLException, DataAccessException {
results.next();
return results.getString(1);
}
});
assertTrue("The logMsg should have changed.", !"new logMsg".equals(newLogMsg));
assertEquals("The logMsg should new be the default logMsg.", newLogMsg, defaultLogMsg);
}
use of org.springframework.jdbc.core.ResultSetExtractor in project ANNIS by korpling.
the class AdministrationDao method listCorpusAlias.
/**
* Provides a list where the keys are the aliases and the values are the
* corpus names.
*
* @param databaseProperties
* @return
*/
public Multimap<String, String> listCorpusAlias(File databaseProperties) {
Multimap<String, String> result = TreeMultimap.create();
DataSource origDataSource = getDataSource().getInnerDataSource();
try {
if (databaseProperties != null) {
getDataSource().setInnerDataSource(createDataSource(databaseProperties));
}
result = getJdbcTemplate().query("SELECT a.alias AS alias, c.name AS corpus\n" + "FROM corpus_alias AS a, corpus AS c\n" + "WHERE\n" + " a.corpus_ref = c.id", new ResultSetExtractor<Multimap<String, String>>() {
@Override
public Multimap<String, String> extractData(ResultSet rs) throws SQLException, DataAccessException {
Multimap<String, String> data = TreeMultimap.create();
while (rs.next()) {
// alias -> corpus name
data.put(rs.getString(1), rs.getString(2));
}
return data;
}
});
} catch (IOException | URISyntaxException | DataAccessException ex) {
if (databaseProperties == null) {
log.error("Could not query corpus list", ex);
} else {
log.error("Could not query corpus list for the file " + databaseProperties.getAbsolutePath(), ex);
}
} finally {
getDataSource().setInnerDataSource(origDataSource);
}
return result;
}
use of org.springframework.jdbc.core.ResultSetExtractor in project metacat by Netflix.
the class DirectSqlGetPartition method getPartitionKeys.
/**
* Gets the partition names/keys based on a filter expression for the specified table.
*
* @param requestContext The Metacat request context
* @param tableName table handle to get partition for
* @param partitionsRequest The metadata for what kind of partitions to get from the table
* @return filtered list of partition names
*/
@Transactional(readOnly = true)
public List<String> getPartitionKeys(final ConnectorRequestContext requestContext, final QualifiedName tableName, final PartitionListRequest partitionsRequest) {
final long start = registry.clock().wallTime();
final List<String> result;
final List<String> partitionNames = partitionsRequest.getPartitionNames();
final Sort sort = partitionsRequest.getSort();
final Pageable pageable = partitionsRequest.getPageable();
final String filterExpression = partitionsRequest.getFilter();
if (filterExpression != null) {
return filterPartitionsColumn(tableName.getDatabaseName(), tableName.getTableName(), partitionNames, PARTITION_NAME, filterExpression, sort, pageable, partitionsRequest.getIncludeAuditOnly());
} else {
final ResultSetExtractor<List<String>> handler = rs -> {
final List<String> names = Lists.newArrayList();
while (rs.next()) {
names.add(rs.getString("name"));
}
return names;
};
result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), null, partitionNames, SQL.SQL_GET_PARTITIONS_WITH_KEY, handler, sort, pageable, partitionsRequest.getIncludeAuditOnly());
}
this.fastServiceMetric.recordTimer(HiveMetrics.TagGetPartitionKeys.getMetricName(), registry.clock().wallTime() - start);
return result;
}
use of org.springframework.jdbc.core.ResultSetExtractor in project metacat by Netflix.
the class DirectSqlGetPartition method filterPartitionsColumn.
/**
* query partitions using filters from name or uri column.
*/
private List<String> filterPartitionsColumn(final String databaseName, final String tableName, final List<String> partitionNames, final String columnName, final String filterExpression, final Sort sort, final Pageable pageable, final boolean forceDisableAudit) {
final FilterPartition filter = config.escapePartitionNameOnFilter() ? new HiveFilterPartition() : new FilterPartition();
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
ResultSetExtractor<List<String>> handler = rs -> {
final List<String> columns = Lists.newArrayList();
while (rs.next()) {
final String name = rs.getString(PARTITION_NAME);
final String uri = rs.getString(PARTITION_URI);
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
columns.add(rs.getString(columnName));
}
}
return columns;
};
return getHandlerResults(databaseName, tableName, filterExpression, partitionNames, SQL.SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable, forceDisableAudit);
}
use of org.springframework.jdbc.core.ResultSetExtractor in project metacat by Netflix.
the class DirectSqlGetPartition method getPartitions.
private List<PartitionHolder> getPartitions(final String databaseName, final String tableName, @Nullable final List<String> partitionIds, @Nullable final String filterExpression, @Nullable final Sort sort, @Nullable final Pageable pageable, final boolean includePartitionDetails, final boolean forceDisableAudit) {
final FilterPartition filter = config.escapePartitionNameOnFilter() ? new HiveFilterPartition() : new FilterPartition();
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
// Handler for reading the result set
final ResultSetExtractor<List<PartitionHolder>> handler = rs -> {
final List<PartitionHolder> result = Lists.newArrayList();
final QualifiedName tableQName = QualifiedName.ofTable(catalogName, databaseName, tableName);
int noOfRows = 0;
while (rs.next()) {
noOfRows++;
final String name = rs.getString("name");
final String uri = rs.getString("uri");
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
final Long id = rs.getLong("id");
final Long sdId = rs.getLong("sd_id");
final Long serdeId = rs.getLong("serde_id");
final String inputFormat = rs.getString("input_format");
final String outputFormat = rs.getString("output_format");
final String serializationLib = rs.getString("slib");
final StorageInfo storageInfo = new StorageInfo();
storageInfo.setUri(uri);
storageInfo.setInputFormat(inputFormat);
storageInfo.setOutputFormat(outputFormat);
storageInfo.setSerializationLib(serializationLib);
final AuditInfo auditInfo = new AuditInfo();
auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
result.add(new PartitionHolder(id, sdId, serdeId, PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName, databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
}
// Fail if the number of partitions exceeds the threshold limit.
if (result.size() > config.getMaxPartitionsThreshold()) {
registry.counter(registry.createId(HiveMetrics.CounterHiveGetPartitionsExceedThresholdFailure.getMetricName()).withTags(tableQName.parts())).increment();
final String message = String.format("Number of partitions queried for table %s exceeded the threshold %d", tableQName, config.getMaxPartitionsThreshold());
log.warn(message);
throw new IllegalArgumentException(message);
}
}
registry.gauge(registry.createId(HiveMetrics.GaugePreExpressionFilterGetPartitionsCount.getMetricName()).withTags(tableQName.parts())).set(noOfRows);
return result;
};
final List<PartitionHolder> partitions = this.getHandlerResults(databaseName, tableName, filterExpression, partitionIds, SQL.SQL_GET_PARTITIONS, handler, sort, pageable, forceDisableAudit);
if (includePartitionDetails && !partitions.isEmpty()) {
final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
for (PartitionHolder partitionHolder : partitions) {
partIds.add(partitionHolder.getId());
sdIds.add(partitionHolder.getSdId());
serdeIds.add(partitionHolder.getSerdeId());
}
final List<ListenableFuture<Void>> futures = Lists.newArrayList();
final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(partIds, SQL.SQL_GET_PARTITION_PARAMS, "part_id", partitionParams)));
final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
if (!sdIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(sdIds, SQL.SQL_GET_SD_PARAMS, "sd_id", sdParams)));
}
final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
if (!serdeIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(serdeIds, SQL.SQL_GET_SERDE_PARAMS, "serde_id", serdeParams)));
}
ListenableFuture<List<Void>> future = null;
try {
future = Futures.allAsList(futures);
final int getPartitionsDetailsTimeout = Integer.parseInt(configuration.getOrDefault(HiveConfigConstants.GET_PARTITION_DETAILS_TIMEOUT, "120"));
future.get(getPartitionsDetailsTimeout, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
try {
if (future != null) {
future.cancel(true);
}
} catch (Exception ignored) {
log.warn("Failed cancelling the task that gets the partition details.");
}
Throwables.propagate(e);
}
for (PartitionHolder partitionHolder : partitions) {
partitionHolder.getPartitionInfo().setMetadata(partitionParams.get(partitionHolder.getId()));
partitionHolder.getPartitionInfo().getSerde().setParameters(sdParams.get(partitionHolder.getSdId()));
partitionHolder.getPartitionInfo().getSerde().setSerdeInfoParameters(serdeParams.get(partitionHolder.getSerdeId()));
}
}
return partitions;
}
Aggregations