use of org.apache.commons.dbutils.ResultSetHandler in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getPartitionCount.
/**
* Number of partitions for the given table.
*
* @param tableName tableName
* @return Number of partitions
*/
@Override
public int getPartitionCount(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName) {
final long start = registry.clock().monotonicTime();
final Map<String, String> tags = new HashMap<String, String>();
tags.put("request", HiveMetrics.getPartitionCount.name());
final Integer result;
final DataSource dataSource = DataSourceManager.get().get(catalogName);
try (Connection conn = dataSource.getConnection()) {
// Handler for reading the result set
final ResultSetHandler<Integer> handler = rs -> {
int count = 0;
while (rs.next()) {
count = rs.getInt("count");
}
return count;
};
result = new QueryRunner().query(conn, SQL_GET_PARTITION_COUNT, handler, tableName.getDatabaseName(), tableName.getTableName());
} catch (SQLException e) {
throw new ConnectorException("getPartitionCount", e);
} finally {
final long duration = registry.clock().monotonicTime() - start;
log.debug("### Time taken to complete getPartitionCount is {} ms", duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
}
return result;
}
use of org.apache.commons.dbutils.ResultSetHandler in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getpartitions.
private List<PartitionInfo> getpartitions(@Nonnull @NonNull final String databaseName, @Nonnull @NonNull final String tableName, @Nullable final List<String> partitionIds, final String filterExpression, final Sort sort, final Pageable pageable, final boolean includePartitionDetails) {
final FilterPartition filter = new FilterPartition();
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
// Handler for reading the result set
final ResultSetHandler<List<PartitionDetail>> handler = rs -> {
final List<PartitionDetail> result = Lists.newArrayList();
while (rs.next()) {
final String name = rs.getString("name");
final String uri = rs.getString("uri");
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
final Long id = rs.getLong("id");
final Long sdId = rs.getLong("sd_id");
final Long serdeId = rs.getLong("serde_id");
final String inputFormat = rs.getString("input_format");
final String outputFormat = rs.getString("output_format");
final String serializationLib = rs.getString("slib");
final StorageInfo storageInfo = new StorageInfo();
storageInfo.setUri(uri);
storageInfo.setInputFormat(inputFormat);
storageInfo.setOutputFormat(outputFormat);
storageInfo.setSerializationLib(serializationLib);
final AuditInfo auditInfo = new AuditInfo();
auditInfo.setCreatedDate(Date.from(Instant.ofEpochSecond(createdDate)));
auditInfo.setLastModifiedDate(Date.from(Instant.ofEpochSecond(createdDate)));
result.add(new PartitionDetail(id, sdId, serdeId, PartitionInfo.builder().name(QualifiedName.ofPartition(catalogName, databaseName, tableName, name)).auditInfo(auditInfo).serde(storageInfo).build()));
}
}
return result;
};
final List<PartitionInfo> partitionInfos = new ArrayList<>();
final List<PartitionDetail> partitions = getHandlerResults(databaseName, tableName, filterExpression, partitionIds, SQL_GET_PARTITIONS, handler, sort, pageable);
if (includePartitionDetails && !partitions.isEmpty()) {
final List<Long> partIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> sdIds = Lists.newArrayListWithCapacity(partitions.size());
final List<Long> serdeIds = Lists.newArrayListWithCapacity(partitions.size());
for (PartitionDetail partitionDetail : partitions) {
partIds.add(partitionDetail.getId());
sdIds.add(partitionDetail.getSdId());
serdeIds.add(partitionDetail.getSerdeId());
}
final List<ListenableFuture<Void>> futures = Lists.newArrayList();
final Map<Long, Map<String, String>> partitionParams = Maps.newHashMap();
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(partIds, SQL_GET_PARTITION_PARAMS, "part_id", partitionParams)));
final Map<Long, Map<String, String>> sdParams = Maps.newHashMap();
if (!sdIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(sdIds, SQL_GET_SD_PARAMS, "sd_id", sdParams)));
}
final Map<Long, Map<String, String>> serdeParams = Maps.newHashMap();
if (!serdeIds.isEmpty()) {
futures.add(threadServiceManager.getExecutor().submit(() -> populateParameters(serdeIds, SQL_GET_SERDE_PARAMS, "serde_id", serdeParams)));
}
try {
Futures.transform(Futures.successfulAsList(futures), Functions.constant(null)).get(1, TimeUnit.HOURS);
} catch (Exception e) {
Throwables.propagate(e);
}
for (PartitionDetail partitionDetail : partitions) {
partitionDetail.getPartitionInfo().setMetadata(partitionParams.get(partitionDetail.getId()));
partitionDetail.getPartitionInfo().getSerde().setParameters(sdParams.get(partitionDetail.getSdId()));
partitionDetail.getPartitionInfo().getSerde().setSerdeInfoParameters(serdeParams.get(partitionDetail.getSerdeId()));
}
}
for (PartitionDetail partitionDetail : partitions) {
partitionInfos.add(partitionDetail.getPartitionInfo());
}
return partitionInfos;
}
use of org.apache.commons.dbutils.ResultSetHandler in project jSqlBox by drinkjava2.
the class ImprovedQueryRunner method batchFlush.
// === Batch execute methods======
/**
* Force flush cached SQLs
*/
public void batchFlush() throws SQLException {
List<Object[]> sqlCacheList = sqlBatchCache.get();
if (sqlCacheList.isEmpty())
return;
// first row
Object[] f = sqlCacheList.get(0);
if (f.length != 6)
throw new DbProRuntimeException("Unexpected batch cached SQL format");
int paramLenth = 0;
if ("i1".equals(f[0]) || "i3".equals(f[0]) || "u1".equals(f[0]) || "u4".equals(f[0]))
paramLenth = 0;
if ("u2".equals(f[0]) || "u5".equals(f[0]))
paramLenth = 1;
else
paramLenth = ((Object[]) sqlCacheList.get(0)[5]).length;
Object[][] allParams = new Object[sqlCacheList.size()][paramLenth];
int i = 0;
for (Object[] c : sqlCacheList) {
// cached parameters
Object param = c[2];
Object[] params = (Object[]) c[5];
if ("i1".equals(f[0]) || "i3".equals(f[0]) || "u1".equals(f[0]) || "u4".equals(f[0]))
allParams[i] = new Object[0];
if ("u2".equals(f[0]) || "u5".equals(f[0]))
allParams[i] = new Object[] { param };
else
allParams[i] = params;
i++;
}
String sql = (String) f[3];
Connection conn = (Connection) f[4];
ResultSetHandler rsh = (ResultSetHandler) f[1];
if (this.getAllowShowSQL()) {
logger.info("Batch execute " + sqlCacheList.size() + " SQLs");
logger.info(formatSqlForLoggerOutput(sql));
logger.info("First row " + formatParametersForLoggerOutput(allParams[0]));
logger.info("Last row " + formatParametersForLoggerOutput(allParams[allParams.length - 1]));
}
if ("e1".equals(f[0]) || "i1".equals(f[0]) || "u1".equals(f[0]) || "u2".equals(f[0]) || "u3".equals(f[0]))
super.batch(conn, sql, allParams);
else if ("e3".equals(f[0]) || "i3".equals(f[0]) || "u4".equals(f[0]) || "u5".equals(f[0]) || "u6".equals(f[0]))
super.batch(sql, allParams);
else if ("e2".equals(f[0]) || "i2".equals(f[0]))
super.insertBatch(conn, sql, rsh, allParams);
else if ("e4".equals(f[0]) || "i4".equals(f[0]))
super.insertBatch(sql, rsh, allParams);
else
throw new DbProRuntimeException("unknow batch sql operation type +'" + f[0] + "'");
sqlBatchCache.get().clear();
}
use of org.apache.commons.dbutils.ResultSetHandler in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getPartitionKeys.
/**
* {@inheritDoc}.
*/
@Override
public List<String> getPartitionKeys(@Nonnull @NonNull final ConnectorContext requestContext, @Nonnull @NonNull final QualifiedName tableName, @Nonnull @NonNull final PartitionListRequest partitionsRequest) {
final long start = registry.clock().monotonicTime();
final Map<String, String> tags = new HashMap<String, String>();
tags.put("request", "getPartitionKeys");
final List<String> result;
final List<String> partitionNames = partitionsRequest.getPartitionNames();
final Sort sort = partitionsRequest.getSort();
final Pageable pageable = partitionsRequest.getPageable();
final String filterExpression = partitionsRequest.getFilter();
if (filterExpression != null) {
final FilterPartition filter = new FilterPartition();
// batch exists
final boolean isBatched = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_BATCHID);
final boolean hasDateCreated = !Strings.isNullOrEmpty(filterExpression) && filterExpression.contains(FIELD_DATE_CREATED);
// Handler for reading the result set
final ResultSetHandler<List<String>> handler = rs -> {
final List<String> names = Lists.newArrayList();
while (rs.next()) {
final String name = rs.getString("name");
final String uri = rs.getString("uri");
final long createdDate = rs.getLong(FIELD_DATE_CREATED);
Map<String, String> values = null;
if (hasDateCreated) {
values = Maps.newHashMap();
values.put(FIELD_DATE_CREATED, createdDate + "");
}
if (Strings.isNullOrEmpty(filterExpression) || filter.evaluatePartitionExpression(filterExpression, name, uri, isBatched, values)) {
names.add(name);
}
}
return names;
};
result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), filterExpression, partitionNames, SQL_GET_PARTITIONS_WITH_KEY_URI, handler, sort, pageable);
} else {
// Handler for reading the result set
final ResultSetHandler<List<String>> handler = rs -> {
final List<String> names = Lists.newArrayList();
while (rs.next()) {
names.add(rs.getString("name"));
}
return names;
};
result = getHandlerResults(tableName.getDatabaseName(), tableName.getTableName(), null, partitionNames, SQL_GET_PARTITIONS_WITH_KEY, handler, sort, pageable);
}
final long duration = registry.clock().monotonicTime() - start;
log.debug("### Time taken to complete getPartitionKeys is {} ms", duration);
this.registry.timer(requestTimerId.withTags(tags)).record(duration, TimeUnit.MILLISECONDS);
return result;
}
use of org.apache.commons.dbutils.ResultSetHandler in project metacat by Netflix.
the class HiveConnectorFastPartitionService method getparameters.
private Map<Long, Map<String, String>> getparameters(final List<Long> ids, final String sql, final String idName) {
// Get data source
final DataSource dataSource = DataSourceManager.get().get(catalogName);
// Create the sql
final StringBuilder queryBuilder = new StringBuilder(sql);
if (!ids.isEmpty()) {
queryBuilder.append(" and ").append(idName).append(" in ('").append(Joiner.on("','").skipNulls().join(ids)).append("')");
}
final ResultSetHandler<Map<Long, Map<String, String>>> handler = rs -> {
final Map<Long, Map<String, String>> result = Maps.newHashMap();
while (rs.next()) {
final Long id = rs.getLong(idName);
final String key = rs.getString("param_key");
final String value = rs.getString("param_value");
Map<String, String> parameters = result.get(id);
if (parameters == null) {
parameters = Maps.newHashMap();
result.put(id, parameters);
}
parameters.put(key, value);
}
return result;
};
try (Connection conn = dataSource.getConnection()) {
return new QueryRunner().query(conn, queryBuilder.toString(), handler);
} catch (SQLException e) {
Throwables.propagate(e);
}
return Maps.newHashMap();
}
Aggregations