Search in sources :

Example 56 with Update

use of org.skife.jdbi.v2.Update in project killbill by killbill.

the class TestNonEntityDao method executeAndReturnGeneratedKeys.

private Long executeAndReturnGeneratedKeys(final Handle handle, final String sql, final Object... args) {
    final Update stmt = handle.createStatement(sql);
    int position = 0;
    for (final Object arg : args) {
        stmt.bind(position++, arg);
    }
    return stmt.executeAndReturnGeneratedKeys(new LongMapper(), "record_id").first();
}
Also used : LongMapper(org.skife.jdbi.v2.util.LongMapper) Update(org.skife.jdbi.v2.Update)

Example 57 with Update

use of org.skife.jdbi.v2.Update in project killbill by killbill.

the class TestInternalCallContextFactory method testCreateInternalCallContextWithAccountRecordIdFromAccountObjectType.

@Test(groups = "slow")
public void testCreateInternalCallContextWithAccountRecordIdFromAccountObjectType() throws Exception {
    final UUID accountId = UUID.randomUUID();
    final Long accountRecordId = dbi.withHandle(new HandleCallback<Long>() {

        @Override
        public Long withHandle(final Handle handle) throws Exception {
            // Note: we always create an accounts table, see MysqlTestingHelper
            return update(handle, "insert into accounts (id, external_key, email, name, first_name_length, reference_time, time_zone, created_date, created_by, updated_date, updated_by, tenant_record_id) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", accountId.toString(), accountId.toString(), "yo@t.com", "toto", 4, new Date(), "UTC", new Date(), "i", new Date(), "j", internalCallContext.getTenantRecordId());
        }

        Long update(final Handle handle, final String sql, final Object... args) {
            final Update stmt = handle.createStatement(sql);
            int position = 0;
            for (final Object arg : args) {
                stmt.bind(position++, arg);
            }
            return stmt.executeAndReturnGeneratedKeys(new LongMapper(), "record_id").first();
        }
    });
    final ImmutableAccountData immutableAccountData = Mockito.mock(ImmutableAccountData.class);
    Mockito.when(immutableAccountInternalApi.getImmutableAccountDataByRecordId(Mockito.<Long>eq(accountRecordId), Mockito.<InternalTenantContext>any())).thenReturn(immutableAccountData);
    final InternalCallContext context = internalCallContextFactory.createInternalCallContext(accountId, ObjectType.ACCOUNT, callContext);
    // The account record id should have been looked up in the accounts table
    Assert.assertEquals(context.getAccountRecordId(), accountRecordId);
    verifyInternalCallContext(context);
}
Also used : ImmutableAccountData(org.killbill.billing.account.api.ImmutableAccountData) InternalCallContext(org.killbill.billing.callcontext.InternalCallContext) Update(org.skife.jdbi.v2.Update) Date(java.util.Date) Handle(org.skife.jdbi.v2.Handle) LongMapper(org.skife.jdbi.v2.util.LongMapper) UUID(java.util.UUID) Test(org.testng.annotations.Test)

Example 58 with Update

use of org.skife.jdbi.v2.Update in project druid by druid-io.

the class SqlSegmentsMetadataQuery method markSegments.

/**
 * Marks the provided segments as either used or unused.
 *
 * Returns the number of segments actually modified.
 */
public int markSegments(final Collection<SegmentId> segmentIds, final boolean used) {
    final String dataSource;
    if (segmentIds.isEmpty()) {
        return 0;
    } else {
        dataSource = segmentIds.iterator().next().getDataSource();
        if (segmentIds.stream().anyMatch(segment -> !dataSource.equals(segment.getDataSource()))) {
            throw new IAE("Segments to drop must all be part of the same datasource");
        }
    }
    final PreparedBatch batch = handle.prepareBatch(StringUtils.format("UPDATE %s SET used = ? WHERE datasource = ? AND id = ?", dbTables.getSegmentsTable()));
    for (SegmentId segmentId : segmentIds) {
        batch.add(used, dataSource, segmentId.toString());
    }
    final int[] segmentChanges = batch.execute();
    return computeNumChangedSegments(segmentIds.stream().map(SegmentId::toString).collect(Collectors.toList()), segmentChanges);
}
Also used : SegmentId(org.apache.druid.timeline.SegmentId) PreparedBatch(org.skife.jdbi.v2.PreparedBatch) IAE(org.apache.druid.java.util.common.IAE)

Example 59 with Update

use of org.skife.jdbi.v2.Update in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method createNewSegment.

/**
 * This function creates a new segment for the given datasource/interval/etc. A critical
 * aspect of the creation is to make sure that the new version & new partition number will make
 * sense given the existing segments & pending segments also very important is to avoid
 * clashes with existing pending & used/unused segments.
 * @param handle Database handle
 * @param dataSource datasource for the new segment
 * @param interval interval for the new segment
 * @param partialShardSpec Shard spec info minus segment id stuff
 * @param existingVersion Version of segments in interval, used to compute the version of the very first segment in
 *                        interval
 * @return
 * @throws IOException
 */
@Nullable
private SegmentIdWithShardSpec createNewSegment(final Handle handle, final String dataSource, final Interval interval, final PartialShardSpec partialShardSpec, final String existingVersion) throws IOException {
    // Get the time chunk and associated data segments for the given interval, if any
    final List<TimelineObjectHolder<String, DataSegment>> existingChunks = getTimelineForIntervalsWithHandle(handle, dataSource, ImmutableList.of(interval)).lookup(interval);
    if (existingChunks.size() > 1) {
        // Not possible to expand more than one chunk with a single segment.
        log.warn("Cannot allocate new segment for dataSource[%s], interval[%s]: already have [%,d] chunks.", dataSource, interval, existingChunks.size());
        return null;
    } else {
        // max partitionId of the shardSpecs which share the same partition space.
        SegmentIdWithShardSpec maxId = null;
        if (!existingChunks.isEmpty()) {
            TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
            // noinspection ConstantConditions
            for (DataSegment segment : FluentIterable.from(existingHolder.getObject()).transform(PartitionChunk::getObject).filter(segment -> segment.getShardSpec().sharePartitionSpace(partialShardSpec))) {
                // Note that this will compute the max id of existing, visible, data segments in the time chunk:
                if (maxId == null || maxId.getShardSpec().getPartitionNum() < segment.getShardSpec().getPartitionNum()) {
                    maxId = SegmentIdWithShardSpec.fromDataSegment(segment);
                }
            }
        }
        // Get the version of the existing chunk, we might need it in some of the cases below
        // to compute the new identifier's version
        @Nullable final String versionOfExistingChunk;
        if (!existingChunks.isEmpty()) {
            // remember only one chunk possible for given interval so get the first & only one
            versionOfExistingChunk = existingChunks.get(0).getVersion();
        } else {
            versionOfExistingChunk = null;
        }
        // next, we need to enrich the maxId computed before with the information of the pending segments
        // it is possible that a pending segment has a higher id in which case we need that, it will work,
        // and it will avoid clashes when inserting the new pending segment later in the caller of this method
        final Set<SegmentIdWithShardSpec> pendings = getPendingSegmentsForIntervalWithHandle(handle, dataSource, interval);
        // Make sure we add the maxId we obtained from the segments table:
        if (maxId != null) {
            pendings.add(maxId);
        }
        // Now compute the maxId with all the information: pendings + segments:
        // The versionOfExistingChunks filter is ensure that we pick the max id with the version of the existing chunk
        // in the case that there may be a pending segment with a higher version but no corresponding used segments
        // which may generate a clash with an existing segment once the new id is generated
        maxId = pendings.stream().filter(id -> id.getShardSpec().sharePartitionSpace(partialShardSpec)).filter(id -> versionOfExistingChunk == null ? true : id.getVersion().equals(versionOfExistingChunk)).max((id1, id2) -> {
            final int versionCompare = id1.getVersion().compareTo(id2.getVersion());
            if (versionCompare != 0) {
                return versionCompare;
            } else {
                return Integer.compare(id1.getShardSpec().getPartitionNum(), id2.getShardSpec().getPartitionNum());
            }
        }).orElse(null);
        // The following code attempts to compute the new version, if this
        // new version is not null at the end of next block then it will be
        // used as the new version in the case for initial or appended segment
        final String newSegmentVersion;
        if (versionOfExistingChunk != null) {
            // segment version overrides, so pick that now that we know it exists
            newSegmentVersion = versionOfExistingChunk;
        } else if (!pendings.isEmpty() && maxId != null) {
            // there is no visible segments in the time chunk, so pick the maxId of pendings, as computed above
            newSegmentVersion = maxId.getVersion();
        } else {
            // no segments, no pendings, so this must be the very first segment created for this interval
            newSegmentVersion = null;
        }
        if (maxId == null) {
            // When appending segments, null maxId means that we are allocating the very initial
            // segment for this time chunk.
            // This code is executed when the Overlord coordinates segment allocation, which is either you append segments
            // or you use segment lock. Since the core partitions set is not determined for appended segments, we set
            // it 0. When you use segment lock, the core partitions set doesn't work with it. We simply set it 0 so that the
            // OvershadowableManager handles the atomic segment update.
            final int newPartitionId = partialShardSpec.useNonRootGenerationPartitionSpace() ? PartitionIds.NON_ROOT_GEN_START_PARTITION_ID : PartitionIds.ROOT_GEN_START_PARTITION_ID;
            String version = newSegmentVersion == null ? existingVersion : newSegmentVersion;
            return new SegmentIdWithShardSpec(dataSource, interval, version, partialShardSpec.complete(jsonMapper, newPartitionId, 0));
        } else if (!maxId.getInterval().equals(interval) || maxId.getVersion().compareTo(existingVersion) > 0) {
            log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], existingVersion[%s]: conflicting segment[%s].", dataSource, interval, existingVersion, maxId);
            return null;
        } else if (maxId.getShardSpec().getNumCorePartitions() == SingleDimensionShardSpec.UNKNOWN_NUM_CORE_PARTITIONS) {
            log.warn("Cannot allocate new segment because of unknown core partition size of segment[%s], shardSpec[%s]", maxId, maxId.getShardSpec());
            return null;
        } else {
            return new SegmentIdWithShardSpec(dataSource, maxId.getInterval(), Preconditions.checkNotNull(newSegmentVersion, "newSegmentVersion"), partialShardSpec.complete(jsonMapper, maxId.getShardSpec().getPartitionNum() + 1, maxId.getShardSpec().getNumCorePartitions()));
        }
    }
}
Also used : Arrays(java.util.Arrays) Partitions(org.apache.druid.timeline.Partitions) Inject(com.google.inject.Inject) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart) StatementContext(org.skife.jdbi.v2.StatementContext) Pair(org.apache.druid.java.util.common.Pair) FluentIterable(com.google.common.collect.FluentIterable) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) ResultSet(java.sql.ResultSet) Map(java.util.Map) PreparedBatch(org.skife.jdbi.v2.PreparedBatch) IAE(org.apache.druid.java.util.common.IAE) CloseableIterator(org.apache.druid.java.util.common.parsers.CloseableIterator) ByteArrayMapper(org.skife.jdbi.v2.util.ByteArrayMapper) DateTimes(org.apache.druid.java.util.common.DateTimes) ImmutableSet(com.google.common.collect.ImmutableSet) JacksonUtils(org.apache.druid.java.util.common.jackson.JacksonUtils) SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) Collection(java.util.Collection) Segments(org.apache.druid.indexing.overlord.Segments) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) ISE(org.apache.druid.java.util.common.ISE) NotNull(javax.validation.constraints.NotNull) Collectors(java.util.stream.Collectors) List(java.util.List) PartitionIds(org.apache.druid.timeline.partition.PartitionIds) IndexerMetadataStorageCoordinator(org.apache.druid.indexing.overlord.IndexerMetadataStorageCoordinator) DataSegment(org.apache.druid.timeline.DataSegment) ISOChronology(org.joda.time.chrono.ISOChronology) PartialShardSpec(org.apache.druid.timeline.partition.PartialShardSpec) Logger(org.apache.druid.java.util.common.logger.Logger) IntStream(java.util.stream.IntStream) Iterables(com.google.common.collect.Iterables) Intervals(org.apache.druid.java.util.common.Intervals) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Hashing(com.google.common.hash.Hashing) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) PartitionChunk(org.apache.druid.timeline.partition.PartitionChunk) Interval(org.joda.time.Interval) Lists(com.google.common.collect.Lists) ImmutableList(com.google.common.collect.ImmutableList) ResultIterator(org.skife.jdbi.v2.ResultIterator) Nullable(javax.annotation.Nullable) VersionedIntervalTimeline(org.apache.druid.timeline.VersionedIntervalTimeline) BaseEncoding(com.google.common.io.BaseEncoding) HandleCallback(org.skife.jdbi.v2.tweak.HandleCallback) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DateTime(org.joda.time.DateTime) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) Query(org.skife.jdbi.v2.Query) IOException(java.io.IOException) NoneShardSpec(org.apache.druid.timeline.partition.NoneShardSpec) Handle(org.skife.jdbi.v2.Handle) SingleDimensionShardSpec(org.apache.druid.timeline.partition.SingleDimensionShardSpec) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) SegmentUtils(org.apache.druid.segment.SegmentUtils) TransactionCallback(org.skife.jdbi.v2.TransactionCallback) Preconditions(com.google.common.base.Preconditions) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) StringEscapeUtils(org.apache.commons.lang.StringEscapeUtils) TimelineObjectHolder(org.apache.druid.timeline.TimelineObjectHolder) SegmentIdWithShardSpec(org.apache.druid.segment.realtime.appenderator.SegmentIdWithShardSpec) DataSegment(org.apache.druid.timeline.DataSegment) Nullable(javax.annotation.Nullable) Nullable(javax.annotation.Nullable)

Example 60 with Update

use of org.skife.jdbi.v2.Update in project killbill by killbill.

the class DatabaseExportDao method exportDataForAccountAndTable.

private void exportDataForAccountAndTable(final DatabaseExportOutputStream out, final List<ColumnInfo> columnsForTable, final InternalTenantContext context) {
    TableType tableType = TableType.OTHER;
    final String tableName = columnsForTable.get(0).getTableName();
    // Ignore casing (for H2)
    if (TableName.ACCOUNT.getTableName().equalsIgnoreCase(tableName)) {
        tableType = TableType.KB_ACCOUNT;
    } else if (TableName.ACCOUNT_HISTORY.getTableName().equalsIgnoreCase(tableName)) {
        tableType = TableType.KB_ACCOUNT_HISTORY;
    }
    boolean firstColumn = true;
    final StringBuilder queryBuilder = new StringBuilder("select ");
    for (final ColumnInfo column : columnsForTable) {
        if (!firstColumn) {
            queryBuilder.append(", ");
        } else {
            firstColumn = false;
        }
        queryBuilder.append(column.getColumnName());
        if (tableType == TableType.OTHER) {
            // Ignore casing (for H2)
            if (column.getColumnName().equalsIgnoreCase(TableType.KB_PER_ACCOUNT.getAccountRecordIdColumnName())) {
                tableType = TableType.KB_PER_ACCOUNT;
            } else if (column.getColumnName().equalsIgnoreCase(TableType.NOTIFICATION.getAccountRecordIdColumnName())) {
                tableType = TableType.NOTIFICATION;
            }
        }
    }
    // Don't export non-account specific tables
    if (tableType == TableType.OTHER) {
        return;
    }
    // Build the query - make sure to filter by account and tenant!
    queryBuilder.append(" from ").append(tableName).append(" where ").append(tableType.getAccountRecordIdColumnName()).append(" = :accountRecordId and ").append(tableType.getTenantRecordIdColumnName()).append("  = :tenantRecordId");
    // Notify the stream that we're about to write data for a different table
    out.newTable(tableName, columnsForTable);
    dbi.withHandle(new HandleCallback<Void>() {

        @Override
        public Void withHandle(final Handle handle) throws Exception {
            final ResultIterator<Map<String, Object>> iterator = handle.createQuery(queryBuilder.toString()).bind("accountRecordId", context.getAccountRecordId()).bind("tenantRecordId", context.getTenantRecordId()).iterator();
            try {
                while (iterator.hasNext()) {
                    final Map<String, Object> row = iterator.next();
                    for (final String k : row.keySet()) {
                        final Object value = row.get(k);
                        // See also LowerToCamelBeanMapper
                        if (value instanceof Blob) {
                            final Blob blob = (Blob) value;
                            row.put(k, blob.getBytes(0, (int) blob.length()));
                        } else if (value instanceof Clob) {
                            // TODO Update LowerToCamelBeanMapper?
                            final Clob clob = (Clob) value;
                            row.put(k, clob.getSubString(1, (int) clob.length()));
                        }
                    }
                    try {
                        out.write(row);
                    } catch (final IOException e) {
                        logger.warn("Unable to write row: {}", row, e);
                        throw e;
                    }
                }
            } finally {
                iterator.close();
            }
            return null;
        }
    });
}
Also used : Blob(java.sql.Blob) ResultIterator(org.skife.jdbi.v2.ResultIterator) ColumnInfo(org.killbill.billing.util.api.ColumnInfo) DefaultColumnInfo(org.killbill.billing.util.validation.DefaultColumnInfo) IOException(java.io.IOException) IOException(java.io.IOException) Handle(org.skife.jdbi.v2.Handle) Clob(java.sql.Clob) Map(java.util.Map)

Aggregations

Update (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.message.rev171207.Update)46 Test (org.junit.Test)41 AbstractRIBSupportTest (org.opendaylight.protocol.bgp.rib.spi.AbstractRIBSupportTest)24 ByteBuf (io.netty.buffer.ByteBuf)15 Attributes1 (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.multiprotocol.rev171207.Attributes1)14 UpdateBuilder (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.message.rev171207.UpdateBuilder)13 Attributes2 (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.multiprotocol.rev171207.Attributes2)12 PeerSpecificParserConstraint (org.opendaylight.protocol.bgp.parser.spi.PeerSpecificParserConstraint)10 Attributes (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.message.rev171207.path.attributes.Attributes)9 Handle (org.skife.jdbi.v2.Handle)9 ArrayList (java.util.ArrayList)8 AttributesBuilder (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.message.rev171207.path.attributes.AttributesBuilder)8 Ipv4Address (org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv4Address)7 AsPathBuilder (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.message.rev171207.path.attributes.attributes.AsPathBuilder)7 OriginBuilder (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.message.rev171207.path.attributes.attributes.OriginBuilder)7 Nlri (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.message.rev171207.update.message.Nlri)7 AsNumber (org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.AsNumber)6 Ipv4Prefix (org.opendaylight.yang.gen.v1.urn.ietf.params.xml.ns.yang.ietf.inet.types.rev130715.Ipv4Prefix)6 Ipv4NextHopCase (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.types.rev130919.next.hop.c.next.hop.Ipv4NextHopCase)6 Ipv4NextHopCaseBuilder (org.opendaylight.yang.gen.v1.urn.opendaylight.params.xml.ns.yang.bgp.types.rev130919.next.hop.c.next.hop.Ipv4NextHopCaseBuilder)6