use of org.skife.jdbi.v2.Update in project killbill by killbill.
the class TestNonEntityDao method executeAndReturnGeneratedKeys.
private Long executeAndReturnGeneratedKeys(final Handle handle, final String sql, final Object... args) {
final Update stmt = handle.createStatement(sql);
int position = 0;
for (final Object arg : args) {
stmt.bind(position++, arg);
}
return stmt.executeAndReturnGeneratedKeys(new LongMapper(), "record_id").first();
}
use of org.skife.jdbi.v2.Update in project killbill by killbill.
the class TestInternalCallContextFactory method testCreateInternalCallContextWithAccountRecordIdFromAccountObjectType.
@Test(groups = "slow")
public void testCreateInternalCallContextWithAccountRecordIdFromAccountObjectType() throws Exception {
final UUID accountId = UUID.randomUUID();
final Long accountRecordId = dbi.withHandle(new HandleCallback<Long>() {
@Override
public Long withHandle(final Handle handle) throws Exception {
// Note: we always create an accounts table, see MysqlTestingHelper
return update(handle, "insert into accounts (id, external_key, email, name, first_name_length, reference_time, time_zone, created_date, created_by, updated_date, updated_by, tenant_record_id) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", accountId.toString(), accountId.toString(), "yo@t.com", "toto", 4, new Date(), "UTC", new Date(), "i", new Date(), "j", internalCallContext.getTenantRecordId());
}
Long update(final Handle handle, final String sql, final Object... args) {
final Update stmt = handle.createStatement(sql);
int position = 0;
for (final Object arg : args) {
stmt.bind(position++, arg);
}
return stmt.executeAndReturnGeneratedKeys(new LongMapper(), "record_id").first();
}
});
final ImmutableAccountData immutableAccountData = Mockito.mock(ImmutableAccountData.class);
Mockito.when(immutableAccountInternalApi.getImmutableAccountDataByRecordId(Mockito.<Long>eq(accountRecordId), Mockito.<InternalTenantContext>any())).thenReturn(immutableAccountData);
final InternalCallContext context = internalCallContextFactory.createInternalCallContext(accountId, ObjectType.ACCOUNT, callContext);
// The account record id should have been looked up in the accounts table
Assert.assertEquals(context.getAccountRecordId(), accountRecordId);
verifyInternalCallContext(context);
}
use of org.skife.jdbi.v2.Update in project druid by druid-io.
the class SqlSegmentsMetadataQuery method markSegments.
/**
* Marks the provided segments as either used or unused.
*
* Returns the number of segments actually modified.
*/
public int markSegments(final Collection<SegmentId> segmentIds, final boolean used) {
final String dataSource;
if (segmentIds.isEmpty()) {
return 0;
} else {
dataSource = segmentIds.iterator().next().getDataSource();
if (segmentIds.stream().anyMatch(segment -> !dataSource.equals(segment.getDataSource()))) {
throw new IAE("Segments to drop must all be part of the same datasource");
}
}
final PreparedBatch batch = handle.prepareBatch(StringUtils.format("UPDATE %s SET used = ? WHERE datasource = ? AND id = ?", dbTables.getSegmentsTable()));
for (SegmentId segmentId : segmentIds) {
batch.add(used, dataSource, segmentId.toString());
}
final int[] segmentChanges = batch.execute();
return computeNumChangedSegments(segmentIds.stream().map(SegmentId::toString).collect(Collectors.toList()), segmentChanges);
}
use of org.skife.jdbi.v2.Update in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinator method createNewSegment.
/**
* This function creates a new segment for the given datasource/interval/etc. A critical
* aspect of the creation is to make sure that the new version & new partition number will make
* sense given the existing segments & pending segments also very important is to avoid
* clashes with existing pending & used/unused segments.
* @param handle Database handle
* @param dataSource datasource for the new segment
* @param interval interval for the new segment
* @param partialShardSpec Shard spec info minus segment id stuff
* @param existingVersion Version of segments in interval, used to compute the version of the very first segment in
* interval
* @return
* @throws IOException
*/
@Nullable
private SegmentIdWithShardSpec createNewSegment(final Handle handle, final String dataSource, final Interval interval, final PartialShardSpec partialShardSpec, final String existingVersion) throws IOException {
// Get the time chunk and associated data segments for the given interval, if any
final List<TimelineObjectHolder<String, DataSegment>> existingChunks = getTimelineForIntervalsWithHandle(handle, dataSource, ImmutableList.of(interval)).lookup(interval);
if (existingChunks.size() > 1) {
// Not possible to expand more than one chunk with a single segment.
log.warn("Cannot allocate new segment for dataSource[%s], interval[%s]: already have [%,d] chunks.", dataSource, interval, existingChunks.size());
return null;
} else {
// max partitionId of the shardSpecs which share the same partition space.
SegmentIdWithShardSpec maxId = null;
if (!existingChunks.isEmpty()) {
TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
// noinspection ConstantConditions
for (DataSegment segment : FluentIterable.from(existingHolder.getObject()).transform(PartitionChunk::getObject).filter(segment -> segment.getShardSpec().sharePartitionSpace(partialShardSpec))) {
// Note that this will compute the max id of existing, visible, data segments in the time chunk:
if (maxId == null || maxId.getShardSpec().getPartitionNum() < segment.getShardSpec().getPartitionNum()) {
maxId = SegmentIdWithShardSpec.fromDataSegment(segment);
}
}
}
// Get the version of the existing chunk, we might need it in some of the cases below
// to compute the new identifier's version
@Nullable final String versionOfExistingChunk;
if (!existingChunks.isEmpty()) {
// remember only one chunk possible for given interval so get the first & only one
versionOfExistingChunk = existingChunks.get(0).getVersion();
} else {
versionOfExistingChunk = null;
}
// next, we need to enrich the maxId computed before with the information of the pending segments
// it is possible that a pending segment has a higher id in which case we need that, it will work,
// and it will avoid clashes when inserting the new pending segment later in the caller of this method
final Set<SegmentIdWithShardSpec> pendings = getPendingSegmentsForIntervalWithHandle(handle, dataSource, interval);
// Make sure we add the maxId we obtained from the segments table:
if (maxId != null) {
pendings.add(maxId);
}
// Now compute the maxId with all the information: pendings + segments:
// The versionOfExistingChunks filter is ensure that we pick the max id with the version of the existing chunk
// in the case that there may be a pending segment with a higher version but no corresponding used segments
// which may generate a clash with an existing segment once the new id is generated
maxId = pendings.stream().filter(id -> id.getShardSpec().sharePartitionSpace(partialShardSpec)).filter(id -> versionOfExistingChunk == null ? true : id.getVersion().equals(versionOfExistingChunk)).max((id1, id2) -> {
final int versionCompare = id1.getVersion().compareTo(id2.getVersion());
if (versionCompare != 0) {
return versionCompare;
} else {
return Integer.compare(id1.getShardSpec().getPartitionNum(), id2.getShardSpec().getPartitionNum());
}
}).orElse(null);
// The following code attempts to compute the new version, if this
// new version is not null at the end of next block then it will be
// used as the new version in the case for initial or appended segment
final String newSegmentVersion;
if (versionOfExistingChunk != null) {
// segment version overrides, so pick that now that we know it exists
newSegmentVersion = versionOfExistingChunk;
} else if (!pendings.isEmpty() && maxId != null) {
// there is no visible segments in the time chunk, so pick the maxId of pendings, as computed above
newSegmentVersion = maxId.getVersion();
} else {
// no segments, no pendings, so this must be the very first segment created for this interval
newSegmentVersion = null;
}
if (maxId == null) {
// When appending segments, null maxId means that we are allocating the very initial
// segment for this time chunk.
// This code is executed when the Overlord coordinates segment allocation, which is either you append segments
// or you use segment lock. Since the core partitions set is not determined for appended segments, we set
// it 0. When you use segment lock, the core partitions set doesn't work with it. We simply set it 0 so that the
// OvershadowableManager handles the atomic segment update.
final int newPartitionId = partialShardSpec.useNonRootGenerationPartitionSpace() ? PartitionIds.NON_ROOT_GEN_START_PARTITION_ID : PartitionIds.ROOT_GEN_START_PARTITION_ID;
String version = newSegmentVersion == null ? existingVersion : newSegmentVersion;
return new SegmentIdWithShardSpec(dataSource, interval, version, partialShardSpec.complete(jsonMapper, newPartitionId, 0));
} else if (!maxId.getInterval().equals(interval) || maxId.getVersion().compareTo(existingVersion) > 0) {
log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], existingVersion[%s]: conflicting segment[%s].", dataSource, interval, existingVersion, maxId);
return null;
} else if (maxId.getShardSpec().getNumCorePartitions() == SingleDimensionShardSpec.UNKNOWN_NUM_CORE_PARTITIONS) {
log.warn("Cannot allocate new segment because of unknown core partition size of segment[%s], shardSpec[%s]", maxId, maxId.getShardSpec());
return null;
} else {
return new SegmentIdWithShardSpec(dataSource, maxId.getInterval(), Preconditions.checkNotNull(newSegmentVersion, "newSegmentVersion"), partialShardSpec.complete(jsonMapper, maxId.getShardSpec().getPartitionNum() + 1, maxId.getShardSpec().getNumCorePartitions()));
}
}
}
use of org.skife.jdbi.v2.Update in project killbill by killbill.
the class DatabaseExportDao method exportDataForAccountAndTable.
private void exportDataForAccountAndTable(final DatabaseExportOutputStream out, final List<ColumnInfo> columnsForTable, final InternalTenantContext context) {
TableType tableType = TableType.OTHER;
final String tableName = columnsForTable.get(0).getTableName();
// Ignore casing (for H2)
if (TableName.ACCOUNT.getTableName().equalsIgnoreCase(tableName)) {
tableType = TableType.KB_ACCOUNT;
} else if (TableName.ACCOUNT_HISTORY.getTableName().equalsIgnoreCase(tableName)) {
tableType = TableType.KB_ACCOUNT_HISTORY;
}
boolean firstColumn = true;
final StringBuilder queryBuilder = new StringBuilder("select ");
for (final ColumnInfo column : columnsForTable) {
if (!firstColumn) {
queryBuilder.append(", ");
} else {
firstColumn = false;
}
queryBuilder.append(column.getColumnName());
if (tableType == TableType.OTHER) {
// Ignore casing (for H2)
if (column.getColumnName().equalsIgnoreCase(TableType.KB_PER_ACCOUNT.getAccountRecordIdColumnName())) {
tableType = TableType.KB_PER_ACCOUNT;
} else if (column.getColumnName().equalsIgnoreCase(TableType.NOTIFICATION.getAccountRecordIdColumnName())) {
tableType = TableType.NOTIFICATION;
}
}
}
// Don't export non-account specific tables
if (tableType == TableType.OTHER) {
return;
}
// Build the query - make sure to filter by account and tenant!
queryBuilder.append(" from ").append(tableName).append(" where ").append(tableType.getAccountRecordIdColumnName()).append(" = :accountRecordId and ").append(tableType.getTenantRecordIdColumnName()).append(" = :tenantRecordId");
// Notify the stream that we're about to write data for a different table
out.newTable(tableName, columnsForTable);
dbi.withHandle(new HandleCallback<Void>() {
@Override
public Void withHandle(final Handle handle) throws Exception {
final ResultIterator<Map<String, Object>> iterator = handle.createQuery(queryBuilder.toString()).bind("accountRecordId", context.getAccountRecordId()).bind("tenantRecordId", context.getTenantRecordId()).iterator();
try {
while (iterator.hasNext()) {
final Map<String, Object> row = iterator.next();
for (final String k : row.keySet()) {
final Object value = row.get(k);
// See also LowerToCamelBeanMapper
if (value instanceof Blob) {
final Blob blob = (Blob) value;
row.put(k, blob.getBytes(0, (int) blob.length()));
} else if (value instanceof Clob) {
// TODO Update LowerToCamelBeanMapper?
final Clob clob = (Clob) value;
row.put(k, clob.getSubString(1, (int) clob.length()));
}
}
try {
out.write(row);
} catch (final IOException e) {
logger.warn("Unable to write row: {}", row, e);
throw e;
}
}
} finally {
iterator.close();
}
return null;
}
});
}
Aggregations