use of org.skife.jdbi.v2.StatementContext in project druid by druid-io.
the class SQLMetadataSegmentManager method enableDatasource.
@Override
public boolean enableDatasource(final String ds) {
try {
final IDBI dbi = connector.getDBI();
VersionedIntervalTimeline<String, DataSegment> segmentTimeline = connector.inReadOnlyTransaction(new TransactionCallback<VersionedIntervalTimeline<String, DataSegment>>() {
@Override
public VersionedIntervalTimeline<String, DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
return handle.createQuery(String.format("SELECT payload FROM %s WHERE dataSource = :dataSource", getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).bind("dataSource", ds).map(ByteArrayMapper.FIRST).fold(new VersionedIntervalTimeline<String, DataSegment>(Ordering.natural()), new Folder3<VersionedIntervalTimeline<String, DataSegment>, byte[]>() {
@Override
public VersionedIntervalTimeline<String, DataSegment> fold(VersionedIntervalTimeline<String, DataSegment> timeline, byte[] payload, FoldController foldController, StatementContext statementContext) throws SQLException {
try {
final DataSegment segment = DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(payload, DataSegment.class));
timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
return timeline;
} catch (Exception e) {
throw new SQLException(e.toString());
}
}
});
}
});
final List<DataSegment> segments = Lists.newArrayList();
for (TimelineObjectHolder<String, DataSegment> objectHolder : segmentTimeline.lookup(new Interval("0000-01-01/3000-01-01"))) {
for (PartitionChunk<DataSegment> partitionChunk : objectHolder.getObject()) {
segments.add(partitionChunk.getObject());
}
}
if (segments.isEmpty()) {
log.warn("No segments found in the database!");
return false;
}
dbi.withHandle(new HandleCallback<Void>() {
@Override
public Void withHandle(Handle handle) throws Exception {
Batch batch = handle.createBatch();
for (DataSegment segment : segments) {
batch.add(String.format("UPDATE %s SET used=true WHERE id = '%s'", getSegmentsTable(), segment.getIdentifier()));
}
batch.execute();
return null;
}
});
} catch (Exception e) {
log.error(e, "Exception enabling datasource %s", ds);
return false;
}
return true;
}
use of org.skife.jdbi.v2.StatementContext in project druid by druid-io.
the class SQLMetadataSegmentManager method poll.
@Override
public void poll() {
try {
if (!started) {
return;
}
ConcurrentHashMap<String, DruidDataSource> newDataSources = new ConcurrentHashMap<String, DruidDataSource>();
log.debug("Starting polling of segment table");
// some databases such as PostgreSQL require auto-commit turned off
// to stream results back, enabling transactions disables auto-commit
//
// setting connection to read-only will allow some database such as MySQL
// to automatically use read-only transaction mode, further optimizing the query
final List<DataSegment> segments = connector.inReadOnlyTransaction(new TransactionCallback<List<DataSegment>>() {
@Override
public List<DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
return handle.createQuery(String.format("SELECT payload FROM %s WHERE used=true", getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).map(new ResultSetMapper<DataSegment>() {
@Override
public DataSegment map(int index, ResultSet r, StatementContext ctx) throws SQLException {
try {
return DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(r.getBytes("payload"), DataSegment.class));
} catch (IOException e) {
log.makeAlert(e, "Failed to read segment from db.");
return null;
}
}
}).list();
}
});
if (segments == null || segments.isEmpty()) {
log.warn("No segments found in the database!");
return;
}
final Collection<DataSegment> segmentsFinal = Collections2.filter(segments, Predicates.notNull());
log.info("Polled and found %,d segments in the database", segments.size());
for (final DataSegment segment : segmentsFinal) {
String datasourceName = segment.getDataSource();
DruidDataSource dataSource = newDataSources.get(datasourceName);
if (dataSource == null) {
dataSource = new DruidDataSource(datasourceName, ImmutableMap.of("created", new DateTime().toString()));
Object shouldBeNull = newDataSources.put(datasourceName, dataSource);
if (shouldBeNull != null) {
log.warn("Just put key[%s] into dataSources and what was there wasn't null!? It was[%s]", datasourceName, shouldBeNull);
}
}
if (!dataSource.getSegments().contains(segment)) {
dataSource.addSegment(segment.getIdentifier(), segment);
}
}
synchronized (lock) {
if (started) {
dataSources.set(newDataSources);
}
}
} catch (Exception e) {
log.makeAlert(e, "Problem polling DB.").emit();
}
}
use of org.skife.jdbi.v2.StatementContext in project irontest by zheng-wang.
the class DBTeststepRunner method run.
protected BasicTeststepRun run(Teststep teststep) throws Exception {
BasicTeststepRun basicTeststepRun = new BasicTeststepRun();
DBAPIResponse response = new DBAPIResponse();
String request = (String) teststep.getRequest();
Endpoint endpoint = teststep.getEndpoint();
DBI jdbi = new DBI(endpoint.getUrl(), endpoint.getUsername(), getDecryptedEndpointPassword());
// get SQL statements (trimmed and without comments) and JDBI script object
List<String> statements = IronTestUtils.getStatements(request);
sanityCheckTheStatements(statements);
Handle handle = jdbi.open();
if (SQLStatementType.isSelectStatement(statements.get(0))) {
// the request is a select statement
RetainingColumnOrderResultSetMapper resultSetMapper = new RetainingColumnOrderResultSetMapper();
// use statements.get(0) instead of the raw request, as Oracle does not support trailing semicolon in select statement
Query<Map<String, Object>> query = handle.createQuery(statements.get(0)).map(resultSetMapper);
// obtain columnNames in case the query returns no row
final List<String> columnNames = new ArrayList<String>();
query.addStatementCustomizer(new BaseStatementCustomizer() {
public void afterExecution(PreparedStatement stmt, StatementContext ctx) throws SQLException {
ResultSetMetaData metaData = stmt.getMetaData();
for (int i = 1; i <= metaData.getColumnCount(); i++) {
columnNames.add(metaData.getColumnLabel(i).toLowerCase());
}
}
});
// limit the number of returned rows
List<Map<String, Object>> rows = query.list(5000);
response.setColumnNames(columnNames);
response.setRowsJSON(jacksonObjectMapper.writeValueAsString(rows));
} else {
// the request is one or more non-select statements
Script script = handle.createScript(request);
int[] returnValues = script.execute();
StringBuilder sb = new StringBuilder();
for (int i = 0; i < returnValues.length; i++) {
String statementType = SQLStatementType.getByStatement(statements.get(i)).toString();
sb.append(returnValues[i]).append(" row(s) ").append(statementType.toLowerCase()).append(statementType.endsWith("E") ? "d" : "ed").append("\n");
response.setStatementExecutionResults(sb.toString());
}
}
handle.close();
basicTeststepRun.setResponse(response);
return basicTeststepRun;
}
use of org.skife.jdbi.v2.StatementContext in project SpinalTap by airbnb.
the class ColumnMapperTest method testColumnMap.
@Test
public void testColumnMap() throws Exception {
ResultSet resultSet = mock(ResultSet.class);
StatementContext context = mock(StatementContext.class);
when(resultSet.getString("COLUMN_NAME")).thenReturn("col1");
when(resultSet.getString("COLUMN_KEY")).thenReturn("PRI");
ColumnInfo column = MysqlSchemaUtil.COLUMN_MAPPER.map(0, resultSet, context);
assertEquals("col1", column.getName());
assertTrue(column.isPrimaryKey());
when(resultSet.getString("COLUMN_NAME")).thenReturn("col2");
when(resultSet.getString("COLUMN_KEY")).thenReturn("");
column = MysqlSchemaUtil.COLUMN_MAPPER.map(0, resultSet, context);
assertEquals("col2", column.getName());
assertFalse(column.isPrimaryKey());
}
use of org.skife.jdbi.v2.StatementContext in project Rosetta by HubSpot.
the class RosettaMapperFactory method mapperFor.
@Override
@SuppressWarnings({ "rawtypes", "unchecked" })
public ResultSetMapper mapperFor(Class rawType, StatementContext ctx) {
ObjectMapper objectMapper = RosettaObjectMapperOverride.resolve(ctx);
final Type genericType;
if (ctx.getSqlObjectMethod() == null) {
genericType = rawType;
} else {
genericType = determineGenericReturnType(rawType, ctx.getSqlObjectMethod().getGenericReturnType());
}
String tableName = SqlTableNameExtractor.extractTableName(ctx.getRewrittenSql());
final RosettaMapper mapper = new RosettaMapper(genericType, objectMapper, tableName);
return new ResultSetMapper() {
@Override
public Object map(int index, ResultSet r, StatementContext ctx) throws SQLException {
return mapper.mapRow(r);
}
};
}
Aggregations