Search in sources :

Example 11 with Handle

use of jnc.platform.win32.Handle in project killbill by killbill.

the class TestConsumableInArrear method testWithDayLightSaving.

@Test(groups = "slow")
public void testWithDayLightSaving() throws Exception {
    clock.setTime(new DateTime("2015-09-01T08:01:01.000Z"));
    final DateTimeZone tz = DateTimeZone.forID("America/Juneau");
    final AccountData accountData = new MockAccountBuilder().name(UUID.randomUUID().toString().substring(1, 8)).firstNameLength(6).email(UUID.randomUUID().toString().substring(1, 8)).phone(UUID.randomUUID().toString().substring(1, 8)).migrated(false).isNotifiedForInvoices(false).externalKey(UUID.randomUUID().toString().substring(1, 8)).billingCycleDayLocal(1).currency(Currency.USD).paymentMethodId(UUID.randomUUID()).timeZone(tz).build();
    final Account account = createAccountWithNonOsgiPaymentMethod(accountData);
    accountChecker.checkAccount(account.getId(), accountData, callContext);
    //
    // CREATE SUBSCRIPTION AND EXPECT BOTH EVENTS: NextEvent.CREATE, NextEvent.BLOCK NextEvent.INVOICE
    //
    final DefaultEntitlement bpSubscription = createBaseEntitlementAndCheckForCompletion(account.getId(), "bundleKey", "Shotgun", ProductCategory.BASE, BillingPeriod.ANNUAL, NextEvent.CREATE, NextEvent.BLOCK, NextEvent.INVOICE);
    // Check bundle after BP got created otherwise we get an error from auditApi.
    subscriptionChecker.checkSubscriptionCreated(bpSubscription.getId(), internalCallContext);
    invoiceChecker.checkInvoice(account.getId(), 1, callContext, new ExpectedInvoiceItemCheck(new LocalDate(2015, 9, 1), null, InvoiceItemType.FIXED, new BigDecimal("0")));
    assertListenerStatus();
    //
    // ADD ADD_ON ON THE SAME DAY
    //
    final DefaultEntitlement aoSubscription = addAOEntitlementAndCheckForCompletion(bpSubscription.getBundleId(), "Bullets", ProductCategory.ADD_ON, BillingPeriod.NO_BILLING_PERIOD, NextEvent.CREATE, NextEvent.BLOCK, NextEvent.NULL_INVOICE);
    assertListenerStatus();
    busHandler.pushExpectedEvents(NextEvent.PHASE, NextEvent.NULL_INVOICE, NextEvent.INVOICE, NextEvent.PAYMENT, NextEvent.INVOICE_PAYMENT);
    clock.addDays(30);
    assertListenerStatus();
    invoiceChecker.checkInvoice(account.getId(), 2, callContext, new ExpectedInvoiceItemCheck(new LocalDate(2015, 10, 1), new LocalDate(2016, 10, 1), InvoiceItemType.RECURRING, new BigDecimal("2399.95")));
    // 2015-11-1
    busHandler.pushExpectedEvent(NextEvent.NULL_INVOICE);
    clock.addMonths(1);
    assertListenerStatus();
    // 2015-12-1
    busHandler.pushExpectedEvent(NextEvent.NULL_INVOICE);
    clock.addMonths(1);
    assertListenerStatus();
    // We sleep to let system creates lots of notification if an infinite loop was indeed happening
    Thread.sleep(3000);
    // And then we check that we only have the expected number of notifications in the history table.
    final Integer countNotifications = dbi.withHandle(new HandleCallback<Integer>() {

        @Override
        public Integer withHandle(final Handle handle) throws Exception {
            List<Map<String, Object>> res = handle.select("select count(*) as count from notifications_history;");
            final Integer count = Integer.valueOf(res.get(0).get("count").toString());
            return count;
        }
    });
    Assert.assertEquals(countNotifications.intValue(), 4);
}
Also used : Account(org.killbill.billing.account.api.Account) MockAccountBuilder(org.killbill.billing.mock.MockAccountBuilder) ExpectedInvoiceItemCheck(org.killbill.billing.beatrix.util.InvoiceChecker.ExpectedInvoiceItemCheck) LocalDate(org.joda.time.LocalDate) DateTime(org.joda.time.DateTime) DateTimeZone(org.joda.time.DateTimeZone) BigDecimal(java.math.BigDecimal) UsageApiException(org.killbill.billing.usage.api.UsageApiException) Handle(org.skife.jdbi.v2.Handle) AccountData(org.killbill.billing.account.api.AccountData) DefaultEntitlement(org.killbill.billing.entitlement.api.DefaultEntitlement) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) Test(org.testng.annotations.Test)

Example 12 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegments.

/**
   * {@inheritDoc}
   */
@Override
public SegmentPublishResult announceHistoricalSegments(final Set<DataSegment> segments, final DataSourceMetadata startMetadata, final DataSourceMetadata endMetadata) throws IOException {
    if (segments.isEmpty()) {
        throw new IllegalArgumentException("segment set must not be empty");
    }
    final String dataSource = segments.iterator().next().getDataSource();
    for (DataSegment segment : segments) {
        if (!dataSource.equals(segment.getDataSource())) {
            throw new IllegalArgumentException("segments must all be from the same dataSource");
        }
    }
    if ((startMetadata == null && endMetadata != null) || (startMetadata != null && endMetadata == null)) {
        throw new IllegalArgumentException("start/end metadata pair must be either null or non-null");
    }
    // Find which segments are used (i.e. not overshadowed).
    final Set<DataSegment> usedSegments = Sets.newHashSet();
    for (TimelineObjectHolder<String, DataSegment> holder : VersionedIntervalTimeline.forSegments(segments).lookup(JodaUtils.ETERNITY)) {
        for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
            usedSegments.add(chunk.getObject());
        }
    }
    final AtomicBoolean txnFailure = new AtomicBoolean(false);
    try {
        return connector.retryTransaction(new TransactionCallback<SegmentPublishResult>() {

            @Override
            public SegmentPublishResult inTransaction(final Handle handle, final TransactionStatus transactionStatus) throws Exception {
                final Set<DataSegment> inserted = Sets.newHashSet();
                if (startMetadata != null) {
                    final DataSourceMetadataUpdateResult result = updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
                    if (result != DataSourceMetadataUpdateResult.SUCCESS) {
                        transactionStatus.setRollbackOnly();
                        txnFailure.set(true);
                        if (result == DataSourceMetadataUpdateResult.FAILURE) {
                            throw new RuntimeException("Aborting transaction!");
                        } else if (result == DataSourceMetadataUpdateResult.TRY_AGAIN) {
                            throw new RetryTransactionException("Aborting transaction!");
                        }
                    }
                }
                for (final DataSegment segment : segments) {
                    if (announceHistoricalSegment(handle, segment, usedSegments.contains(segment))) {
                        inserted.add(segment);
                    }
                }
                return new SegmentPublishResult(ImmutableSet.copyOf(inserted), true);
            }
        }, 3, SQLMetadataConnector.DEFAULT_MAX_TRIES);
    } catch (CallbackFailedException e) {
        if (txnFailure.get()) {
            return new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false);
        } else {
            throw e;
        }
    }
}
Also used : ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) Handle(org.skife.jdbi.v2.Handle) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SegmentPublishResult(io.druid.indexing.overlord.SegmentPublishResult)

Example 13 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class SQLMetadataSegmentManager method removeSegment.

@Override
public boolean removeSegment(String ds, final String segmentID) {
    try {
        connector.getDBI().withHandle(new HandleCallback<Void>() {

            @Override
            public Void withHandle(Handle handle) throws Exception {
                handle.createStatement(String.format("UPDATE %s SET used=false WHERE id = :segmentID", getSegmentsTable())).bind("segmentID", segmentID).execute();
                return null;
            }
        });
        ConcurrentHashMap<String, DruidDataSource> dataSourceMap = dataSources.get();
        if (!dataSourceMap.containsKey(ds)) {
            log.warn("Cannot find datasource %s", ds);
            return false;
        }
        DruidDataSource dataSource = dataSourceMap.get(ds);
        dataSource.removePartition(segmentID);
        if (dataSource.isEmpty()) {
            dataSourceMap.remove(ds);
        }
    } catch (Exception e) {
        log.error(e, e.toString());
        return false;
    }
    return true;
}
Also used : DruidDataSource(io.druid.client.DruidDataSource) SQLException(java.sql.SQLException) IOException(java.io.IOException) Handle(org.skife.jdbi.v2.Handle)

Example 14 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class SQLMetadataSegmentManager method enableDatasource.

@Override
public boolean enableDatasource(final String ds) {
    try {
        final IDBI dbi = connector.getDBI();
        VersionedIntervalTimeline<String, DataSegment> segmentTimeline = connector.inReadOnlyTransaction(new TransactionCallback<VersionedIntervalTimeline<String, DataSegment>>() {

            @Override
            public VersionedIntervalTimeline<String, DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
                return handle.createQuery(String.format("SELECT payload FROM %s WHERE dataSource = :dataSource", getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).bind("dataSource", ds).map(ByteArrayMapper.FIRST).fold(new VersionedIntervalTimeline<String, DataSegment>(Ordering.natural()), new Folder3<VersionedIntervalTimeline<String, DataSegment>, byte[]>() {

                    @Override
                    public VersionedIntervalTimeline<String, DataSegment> fold(VersionedIntervalTimeline<String, DataSegment> timeline, byte[] payload, FoldController foldController, StatementContext statementContext) throws SQLException {
                        try {
                            final DataSegment segment = DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(payload, DataSegment.class));
                            timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment));
                            return timeline;
                        } catch (Exception e) {
                            throw new SQLException(e.toString());
                        }
                    }
                });
            }
        });
        final List<DataSegment> segments = Lists.newArrayList();
        for (TimelineObjectHolder<String, DataSegment> objectHolder : segmentTimeline.lookup(new Interval("0000-01-01/3000-01-01"))) {
            for (PartitionChunk<DataSegment> partitionChunk : objectHolder.getObject()) {
                segments.add(partitionChunk.getObject());
            }
        }
        if (segments.isEmpty()) {
            log.warn("No segments found in the database!");
            return false;
        }
        dbi.withHandle(new HandleCallback<Void>() {

            @Override
            public Void withHandle(Handle handle) throws Exception {
                Batch batch = handle.createBatch();
                for (DataSegment segment : segments) {
                    batch.add(String.format("UPDATE %s SET used=true WHERE id = '%s'", getSegmentsTable(), segment.getIdentifier()));
                }
                batch.execute();
                return null;
            }
        });
    } catch (Exception e) {
        log.error(e, "Exception enabling datasource %s", ds);
        return false;
    }
    return true;
}
Also used : IDBI(org.skife.jdbi.v2.IDBI) SQLException(java.sql.SQLException) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) Handle(org.skife.jdbi.v2.Handle) StatementContext(org.skife.jdbi.v2.StatementContext) FoldController(org.skife.jdbi.v2.FoldController) Batch(org.skife.jdbi.v2.Batch) VersionedIntervalTimeline(io.druid.timeline.VersionedIntervalTimeline) Folder3(org.skife.jdbi.v2.Folder3) Interval(org.joda.time.Interval)

Example 15 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class SQLMetadataSegmentManager method poll.

@Override
public void poll() {
    try {
        if (!started) {
            return;
        }
        ConcurrentHashMap<String, DruidDataSource> newDataSources = new ConcurrentHashMap<String, DruidDataSource>();
        log.debug("Starting polling of segment table");
        // some databases such as PostgreSQL require auto-commit turned off
        // to stream results back, enabling transactions disables auto-commit
        //
        // setting connection to read-only will allow some database such as MySQL
        // to automatically use read-only transaction mode, further optimizing the query
        final List<DataSegment> segments = connector.inReadOnlyTransaction(new TransactionCallback<List<DataSegment>>() {

            @Override
            public List<DataSegment> inTransaction(Handle handle, TransactionStatus status) throws Exception {
                return handle.createQuery(String.format("SELECT payload FROM %s WHERE used=true", getSegmentsTable())).setFetchSize(connector.getStreamingFetchSize()).map(new ResultSetMapper<DataSegment>() {

                    @Override
                    public DataSegment map(int index, ResultSet r, StatementContext ctx) throws SQLException {
                        try {
                            return DATA_SEGMENT_INTERNER.intern(jsonMapper.readValue(r.getBytes("payload"), DataSegment.class));
                        } catch (IOException e) {
                            log.makeAlert(e, "Failed to read segment from db.");
                            return null;
                        }
                    }
                }).list();
            }
        });
        if (segments == null || segments.isEmpty()) {
            log.warn("No segments found in the database!");
            return;
        }
        final Collection<DataSegment> segmentsFinal = Collections2.filter(segments, Predicates.notNull());
        log.info("Polled and found %,d segments in the database", segments.size());
        for (final DataSegment segment : segmentsFinal) {
            String datasourceName = segment.getDataSource();
            DruidDataSource dataSource = newDataSources.get(datasourceName);
            if (dataSource == null) {
                dataSource = new DruidDataSource(datasourceName, ImmutableMap.of("created", new DateTime().toString()));
                Object shouldBeNull = newDataSources.put(datasourceName, dataSource);
                if (shouldBeNull != null) {
                    log.warn("Just put key[%s] into dataSources and what was there wasn't null!?  It was[%s]", datasourceName, shouldBeNull);
                }
            }
            if (!dataSource.getSegments().contains(segment)) {
                dataSource.addSegment(segment.getIdentifier(), segment);
            }
        }
        synchronized (lock) {
            if (started) {
                dataSources.set(newDataSources);
            }
        }
    } catch (Exception e) {
        log.makeAlert(e, "Problem polling DB.").emit();
    }
}
Also used : SQLException(java.sql.SQLException) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) IOException(java.io.IOException) DruidDataSource(io.druid.client.DruidDataSource) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) DateTime(org.joda.time.DateTime) Handle(org.skife.jdbi.v2.Handle) StatementContext(org.skife.jdbi.v2.StatementContext) ResultSet(java.sql.ResultSet) List(java.util.List) ArrayList(java.util.ArrayList) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Aggregations

Handle (org.skife.jdbi.v2.Handle)103 DBI (org.skife.jdbi.v2.DBI)28 Before (org.junit.Before)21 IOException (java.io.IOException)18 List (java.util.List)17 DataSourceFactory (io.dropwizard.db.DataSourceFactory)15 DBIFactory (io.dropwizard.jdbi.DBIFactory)15 SQLException (java.sql.SQLException)15 Map (java.util.Map)14 Test (org.junit.Test)14 Test (org.testng.annotations.Test)14 DateTime (org.joda.time.DateTime)13 ArrayList (java.util.ArrayList)11 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)11 ResultSet (java.sql.ResultSet)10 ImmutableList (com.google.common.collect.ImmutableList)8 UUID (java.util.UUID)8 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)7 ImmutableSet (com.google.common.collect.ImmutableSet)6 Set (java.util.Set)6