Search in sources :

Example 46 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinatorTest method testDropSegmentsWithHandleForSegmentThatExist.

@Test
public void testDropSegmentsWithHandleForSegmentThatExist() {
    try (Handle handle = derbyConnector.getDBI().open()) {
        Assert.assertTrue(insertUsedSegments(ImmutableSet.of(defaultSegment)));
        List<String> usedSegments = retrieveUsedSegmentIds();
        Assert.assertEquals(1, usedSegments.size());
        Assert.assertEquals(defaultSegment.getId().toString(), usedSegments.get(0));
        // Try drop segment
        IndexerSQLMetadataStorageCoordinator.DataStoreMetadataUpdateResult result = coordinator.dropSegmentsWithHandle(handle, ImmutableSet.of(defaultSegment), defaultSegment.getDataSource());
        Assert.assertEquals(IndexerSQLMetadataStorageCoordinator.DataStoreMetadataUpdateResult.SUCCESS, result);
        usedSegments = retrieveUsedSegmentIds();
        Assert.assertEquals(0, usedSegments.size());
    }
}
Also used : Handle(org.skife.jdbi.v2.Handle) Test(org.junit.Test)

Example 47 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class DerivativeDataSourceManager method updateDerivatives.

private void updateDerivatives() {
    List<Pair<String, DerivativeDataSourceMetadata>> derivativesInDatabase = connector.retryWithHandle(handle -> handle.createQuery(StringUtils.format("SELECT DISTINCT dataSource,commit_metadata_payload FROM %1$s", dbTables.get().getDataSourceTable())).map((int index, ResultSet r, StatementContext ctx) -> {
        String datasourceName = r.getString("dataSource");
        DataSourceMetadata payload = JacksonUtils.readValue(objectMapper, r.getBytes("commit_metadata_payload"), DataSourceMetadata.class);
        if (!(payload instanceof DerivativeDataSourceMetadata)) {
            return null;
        }
        DerivativeDataSourceMetadata metadata = (DerivativeDataSourceMetadata) payload;
        return new Pair<>(datasourceName, metadata);
    }).list());
    List<DerivativeDataSource> derivativeDataSources = derivativesInDatabase.parallelStream().filter(data -> data != null).map(derivatives -> {
        String name = derivatives.lhs;
        DerivativeDataSourceMetadata metadata = derivatives.rhs;
        String baseDataSource = metadata.getBaseDataSource();
        long avgSizePerGranularity = getAvgSizePerGranularity(name);
        log.info("find derivatives: {bases=%s, derivative=%s, dimensions=%s, metrics=%s, avgSize=%s}", baseDataSource, name, metadata.getDimensions(), metadata.getMetrics(), avgSizePerGranularity);
        return new DerivativeDataSource(name, baseDataSource, metadata.getColumns(), avgSizePerGranularity);
    }).filter(derivatives -> derivatives.getAvgSizeBasedGranularity() > 0).collect(Collectors.toList());
    ConcurrentHashMap<String, SortedSet<DerivativeDataSource>> newDerivatives = new ConcurrentHashMap<>();
    for (DerivativeDataSource derivative : derivativeDataSources) {
        newDerivatives.computeIfAbsent(derivative.getBaseDataSource(), ds -> new TreeSet<>()).add(derivative);
    }
    ConcurrentHashMap<String, SortedSet<DerivativeDataSource>> current;
    do {
        current = DERIVATIVES_REF.get();
    } while (!DERIVATIVES_REF.compareAndSet(current, newDerivatives));
}
Also used : MoreExecutors(com.google.common.util.concurrent.MoreExecutors) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) SortedSet(java.util.SortedSet) Intervals(org.apache.druid.java.util.common.Intervals) Inject(com.google.inject.Inject) Supplier(com.google.common.base.Supplier) Duration(org.joda.time.Duration) LifecycleStart(org.apache.druid.java.util.common.lifecycle.LifecycleStart) AtomicReference(java.util.concurrent.atomic.AtomicReference) StatementContext(org.skife.jdbi.v2.StatementContext) TreeSet(java.util.TreeSet) Pair(org.apache.druid.java.util.common.Pair) HashSet(java.util.HashSet) Interval(org.joda.time.Interval) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) LifecycleStop(org.apache.druid.java.util.common.lifecycle.LifecycleStop) ResultSet(java.sql.ResultSet) ManageLifecycle(org.apache.druid.guice.ManageLifecycle) ListeningScheduledExecutorService(com.google.common.util.concurrent.ListeningScheduledExecutorService) DateTimes(org.apache.druid.java.util.common.DateTimes) SQLMetadataConnector(org.apache.druid.metadata.SQLMetadataConnector) ImmutableSet(com.google.common.collect.ImmutableSet) Execs(org.apache.druid.java.util.common.concurrent.Execs) JacksonUtils(org.apache.druid.java.util.common.jackson.JacksonUtils) EmittingLogger(org.apache.druid.java.util.emitter.EmittingLogger) MetadataStorageTablesConfig(org.apache.druid.metadata.MetadataStorageTablesConfig) ImmutableMap(com.google.common.collect.ImmutableMap) HandleCallback(org.skife.jdbi.v2.tweak.HandleCallback) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) StringUtils(org.apache.druid.java.util.common.StringUtils) Set(java.util.Set) Collectors(java.util.stream.Collectors) TimeUnit(java.util.concurrent.TimeUnit) List(java.util.List) Handle(org.skife.jdbi.v2.Handle) DerivativeDataSourceMetadata(org.apache.druid.indexing.materializedview.DerivativeDataSourceMetadata) DataSegment(org.apache.druid.timeline.DataSegment) SortedSet(java.util.SortedSet) StatementContext(org.skife.jdbi.v2.StatementContext) DerivativeDataSourceMetadata(org.apache.druid.indexing.materializedview.DerivativeDataSourceMetadata) DataSourceMetadata(org.apache.druid.indexing.overlord.DataSourceMetadata) DerivativeDataSourceMetadata(org.apache.druid.indexing.materializedview.DerivativeDataSourceMetadata) TreeSet(java.util.TreeSet) ResultSet(java.sql.ResultSet) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Pair(org.apache.druid.java.util.common.Pair)

Example 48 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class DerivativeDataSourceManager method getAvgSizePerGranularity.

/**
 * calculate the average data size per segment granularity for a given datasource.
 *
 * e.g. for a datasource, there're 5 segments as follows,
 * interval = "2018-04-01/2017-04-02", segment size = 1024 * 1024 * 2
 * interval = "2018-04-01/2017-04-02", segment size = 1024 * 1024 * 2
 * interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
 * interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
 * interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
 * Then, we get interval number = 2, total segment size = 1024 * 1024 * 7
 * At last, return the result 1024 * 1024 * 7 / 2 = 1024 * 1024 * 3.5
 *
 * @param datasource
 * @return average data size per segment granularity
 */
private long getAvgSizePerGranularity(String datasource) {
    return connector.retryWithHandle(new HandleCallback<Long>() {

        Set<Interval> intervals = new HashSet<>();

        long totalSize = 0;

        @Override
        public Long withHandle(Handle handle) {
            handle.createQuery(StringUtils.format("SELECT start,%1$send%1$s,payload FROM %2$s WHERE used = true AND dataSource = :dataSource", connector.getQuoteString(), dbTables.get().getSegmentsTable())).bind("dataSource", datasource).map((int index, ResultSet r, StatementContext ctx) -> {
                intervals.add(Intervals.utc(DateTimes.of(r.getString("start")).getMillis(), DateTimes.of(r.getString("end")).getMillis()));
                DataSegment segment = JacksonUtils.readValue(objectMapper, r.getBytes("payload"), DataSegment.class);
                totalSize += segment.getSize();
                return null;
            }).list();
            return intervals.isEmpty() ? 0L : totalSize / intervals.size();
        }
    });
}
Also used : ResultSet(java.sql.ResultSet) DataSegment(org.apache.druid.timeline.DataSegment) Interval(org.joda.time.Interval) HashSet(java.util.HashSet) Handle(org.skife.jdbi.v2.Handle) StatementContext(org.skife.jdbi.v2.StatementContext)

Example 49 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class JdbcCacheGenerator method generateCache.

@Override
@Nullable
public CacheScheduler.VersionedCache generateCache(final JdbcExtractionNamespace namespace, final CacheScheduler.EntryImpl<JdbcExtractionNamespace> entryId, final String lastVersion, final CacheScheduler scheduler) {
    final long lastCheck = lastVersion == null ? JodaUtils.MIN_INSTANT : Long.parseLong(lastVersion);
    final Long lastDBUpdate;
    final long dbQueryStart;
    try {
        lastDBUpdate = lastUpdates(entryId, namespace);
        if (lastDBUpdate != null && lastDBUpdate <= lastCheck) {
            return null;
        }
    } catch (UnableToObtainConnectionException e) {
        if (e.getMessage().contains(NO_SUITABLE_DRIVER_FOUND_ERROR)) {
            throw new ISE(e, JDBC_DRIVER_JAR_FILES_MISSING_ERROR);
        } else {
            throw e;
        }
    }
    dbQueryStart = System.currentTimeMillis();
    LOG.debug("Updating %s", entryId);
    final String newVersion;
    if (lastDBUpdate != null) {
        newVersion = lastDBUpdate.toString();
    } else {
        newVersion = StringUtils.format("%d", dbQueryStart);
    }
    final CacheScheduler.VersionedCache versionedCache = scheduler.createVersionedCache(entryId, newVersion);
    final long startNs = System.nanoTime();
    try (Handle handle = getHandle(entryId, namespace);
        ResultIterator<Pair<String, String>> pairs = getLookupPairs(handle, namespace)) {
        final Map<String, String> cache = versionedCache.getCache();
        final MapPopulator.PopulateResult populateResult = MapPopulator.populateAndWarnAtByteLimit(pairs, cache, (long) (MAX_MEMORY * namespace.getMaxHeapPercentage() / 100.0), null == entryId ? null : entryId.toString());
        final long duration = System.nanoTime() - startNs;
        LOG.info("Finished loading %,d values (%d bytes) for [%s] in %,d ns", populateResult.getEntries(), populateResult.getBytes(), entryId, duration);
        return versionedCache;
    } catch (UnableToObtainConnectionException e) {
        if (e.getMessage().contains(NO_SUITABLE_DRIVER_FOUND_ERROR)) {
            throw new ISE(e, JDBC_DRIVER_JAR_FILES_MISSING_ERROR);
        } else {
            throw e;
        }
    } catch (Throwable t) {
        try {
            versionedCache.close();
        } catch (Exception e) {
            t.addSuppressed(e);
        }
        throw t;
    }
}
Also used : UnableToObtainConnectionException(org.skife.jdbi.v2.exceptions.UnableToObtainConnectionException) Handle(org.skife.jdbi.v2.Handle) UnableToObtainConnectionException(org.skife.jdbi.v2.exceptions.UnableToObtainConnectionException) ISE(org.apache.druid.java.util.common.ISE) CacheScheduler(org.apache.druid.server.lookup.namespace.cache.CacheScheduler) Pair(org.apache.druid.java.util.common.Pair) MapPopulator(org.apache.druid.data.input.MapPopulator) Nullable(javax.annotation.Nullable)

Example 50 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class JdbcExtractionNamespaceTest method setup.

@Before
public void setup() throws Exception {
    lifecycle = new Lifecycle();
    updates = new AtomicLong(0L);
    updateLock = new ReentrantLock(true);
    closer = Closer.create();
    setupTeardownService = MoreExecutors.listeningDecorator(Execs.multiThreaded(2, "JDBCExtractionNamespaceTeardown--%s"));
    final ListenableFuture<Handle> setupFuture = setupTeardownService.submit(new Callable<Handle>() {

        @Override
        public Handle call() {
            final Handle handle = derbyConnectorRule.getConnector().getDBI().open();
            Assert.assertEquals(0, handle.createStatement(StringUtils.format("CREATE TABLE %s (%s TIMESTAMP, %s VARCHAR(64), %s VARCHAR(64), %s VARCHAR(64))", TABLE_NAME, TS_COLUMN, FILTER_COLUMN, KEY_NAME, VAL_NAME)).setQueryTimeout(1).execute());
            handle.createStatement(StringUtils.format("TRUNCATE TABLE %s", TABLE_NAME)).setQueryTimeout(1).execute();
            handle.commit();
            closer.register(new Closeable() {

                @Override
                public void close() throws IOException {
                    handle.createStatement("DROP TABLE " + TABLE_NAME).setQueryTimeout(1).execute();
                    final ListenableFuture future = setupTeardownService.submit(new Runnable() {

                        @Override
                        public void run() {
                            handle.close();
                        }
                    });
                    try (Closeable closeable = new Closeable() {

                        @Override
                        public void close() {
                            future.cancel(true);
                        }
                    }) {
                        future.get(10, TimeUnit.SECONDS);
                    } catch (InterruptedException | ExecutionException | TimeoutException e) {
                        throw new IOException("Error closing handle", e);
                    }
                }
            });
            closer.register(new Closeable() {

                @Override
                public void close() {
                    if (scheduler == null) {
                        return;
                    }
                    Assert.assertEquals(0, scheduler.getActiveEntries());
                }
            });
            for (Map.Entry<String, String[]> entry : RENAMES.entrySet()) {
                try {
                    String key = entry.getKey();
                    String value = entry.getValue()[0];
                    String filter = entry.getValue()[1];
                    insertValues(handle, key, value, filter, "2015-01-01 00:00:00");
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    throw new RuntimeException(e);
                }
            }
            NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
            scheduler = new CacheScheduler(noopServiceEmitter, ImmutableMap.of(JdbcExtractionNamespace.class, new CacheGenerator<JdbcExtractionNamespace>() {

                private final JdbcCacheGenerator delegate = new JdbcCacheGenerator();

                @Override
                public CacheScheduler.VersionedCache generateCache(final JdbcExtractionNamespace namespace, final CacheScheduler.EntryImpl<JdbcExtractionNamespace> id, final String lastVersion, final CacheScheduler scheduler) throws InterruptedException {
                    updateLock.lockInterruptibly();
                    try {
                        log.debug("Running cache generator");
                        try {
                            return delegate.generateCache(namespace, id, lastVersion, scheduler);
                        } finally {
                            updates.incrementAndGet();
                        }
                    } finally {
                        updateLock.unlock();
                    }
                }
            }), new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter, new NamespaceExtractionConfig()));
            try {
                lifecycle.start();
            } catch (Exception e) {
                throw new RuntimeException(e);
            }
            closer.register(new Closeable() {

                @Override
                public void close() throws IOException {
                    final ListenableFuture future = setupTeardownService.submit(new Runnable() {

                        @Override
                        public void run() {
                            lifecycle.stop();
                        }
                    });
                    try (final Closeable closeable = new Closeable() {

                        @Override
                        public void close() {
                            future.cancel(true);
                        }
                    }) {
                        future.get(30, TimeUnit.SECONDS);
                    } catch (InterruptedException | ExecutionException | TimeoutException e) {
                        throw new IOException("Error stopping lifecycle", e);
                    }
                }
            });
            return handle;
        }
    });
    try (final Closeable ignore = () -> setupFuture.cancel(true)) {
        handleRef = setupFuture.get(10, TimeUnit.SECONDS);
    }
    Assert.assertNotNull(handleRef);
}
Also used : JdbcCacheGenerator(org.apache.druid.server.lookup.namespace.JdbcCacheGenerator) CacheGenerator(org.apache.druid.query.lookup.namespace.CacheGenerator) Closeable(java.io.Closeable) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) ReentrantLock(java.util.concurrent.locks.ReentrantLock) JdbcCacheGenerator(org.apache.druid.server.lookup.namespace.JdbcCacheGenerator) NamespaceExtractionConfig(org.apache.druid.server.lookup.namespace.NamespaceExtractionConfig) Lifecycle(org.apache.druid.java.util.common.lifecycle.Lifecycle) NoopServiceEmitter(org.apache.druid.server.metrics.NoopServiceEmitter) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) Handle(org.skife.jdbi.v2.Handle) AtomicLong(java.util.concurrent.atomic.AtomicLong) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) JdbcExtractionNamespace(org.apache.druid.query.lookup.namespace.JdbcExtractionNamespace) Before(org.junit.Before)

Aggregations

Handle (org.skife.jdbi.v2.Handle)103 DBI (org.skife.jdbi.v2.DBI)28 Before (org.junit.Before)21 IOException (java.io.IOException)18 List (java.util.List)17 DataSourceFactory (io.dropwizard.db.DataSourceFactory)15 DBIFactory (io.dropwizard.jdbi.DBIFactory)15 SQLException (java.sql.SQLException)15 Map (java.util.Map)14 Test (org.junit.Test)14 Test (org.testng.annotations.Test)14 DateTime (org.joda.time.DateTime)13 ArrayList (java.util.ArrayList)11 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)11 ResultSet (java.sql.ResultSet)10 ImmutableList (com.google.common.collect.ImmutableList)8 UUID (java.util.UUID)8 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)7 ImmutableSet (com.google.common.collect.ImmutableSet)6 Set (java.util.Set)6