use of jnc.platform.win32.Handle in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testDropSegmentsWithHandleForSegmentThatExist.
@Test
public void testDropSegmentsWithHandleForSegmentThatExist() {
try (Handle handle = derbyConnector.getDBI().open()) {
Assert.assertTrue(insertUsedSegments(ImmutableSet.of(defaultSegment)));
List<String> usedSegments = retrieveUsedSegmentIds();
Assert.assertEquals(1, usedSegments.size());
Assert.assertEquals(defaultSegment.getId().toString(), usedSegments.get(0));
// Try drop segment
IndexerSQLMetadataStorageCoordinator.DataStoreMetadataUpdateResult result = coordinator.dropSegmentsWithHandle(handle, ImmutableSet.of(defaultSegment), defaultSegment.getDataSource());
Assert.assertEquals(IndexerSQLMetadataStorageCoordinator.DataStoreMetadataUpdateResult.SUCCESS, result);
usedSegments = retrieveUsedSegmentIds();
Assert.assertEquals(0, usedSegments.size());
}
}
use of jnc.platform.win32.Handle in project druid by druid-io.
the class DerivativeDataSourceManager method updateDerivatives.
private void updateDerivatives() {
List<Pair<String, DerivativeDataSourceMetadata>> derivativesInDatabase = connector.retryWithHandle(handle -> handle.createQuery(StringUtils.format("SELECT DISTINCT dataSource,commit_metadata_payload FROM %1$s", dbTables.get().getDataSourceTable())).map((int index, ResultSet r, StatementContext ctx) -> {
String datasourceName = r.getString("dataSource");
DataSourceMetadata payload = JacksonUtils.readValue(objectMapper, r.getBytes("commit_metadata_payload"), DataSourceMetadata.class);
if (!(payload instanceof DerivativeDataSourceMetadata)) {
return null;
}
DerivativeDataSourceMetadata metadata = (DerivativeDataSourceMetadata) payload;
return new Pair<>(datasourceName, metadata);
}).list());
List<DerivativeDataSource> derivativeDataSources = derivativesInDatabase.parallelStream().filter(data -> data != null).map(derivatives -> {
String name = derivatives.lhs;
DerivativeDataSourceMetadata metadata = derivatives.rhs;
String baseDataSource = metadata.getBaseDataSource();
long avgSizePerGranularity = getAvgSizePerGranularity(name);
log.info("find derivatives: {bases=%s, derivative=%s, dimensions=%s, metrics=%s, avgSize=%s}", baseDataSource, name, metadata.getDimensions(), metadata.getMetrics(), avgSizePerGranularity);
return new DerivativeDataSource(name, baseDataSource, metadata.getColumns(), avgSizePerGranularity);
}).filter(derivatives -> derivatives.getAvgSizeBasedGranularity() > 0).collect(Collectors.toList());
ConcurrentHashMap<String, SortedSet<DerivativeDataSource>> newDerivatives = new ConcurrentHashMap<>();
for (DerivativeDataSource derivative : derivativeDataSources) {
newDerivatives.computeIfAbsent(derivative.getBaseDataSource(), ds -> new TreeSet<>()).add(derivative);
}
ConcurrentHashMap<String, SortedSet<DerivativeDataSource>> current;
do {
current = DERIVATIVES_REF.get();
} while (!DERIVATIVES_REF.compareAndSet(current, newDerivatives));
}
use of jnc.platform.win32.Handle in project druid by druid-io.
the class DerivativeDataSourceManager method getAvgSizePerGranularity.
/**
* calculate the average data size per segment granularity for a given datasource.
*
* e.g. for a datasource, there're 5 segments as follows,
* interval = "2018-04-01/2017-04-02", segment size = 1024 * 1024 * 2
* interval = "2018-04-01/2017-04-02", segment size = 1024 * 1024 * 2
* interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
* interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
* interval = "2018-04-02/2017-04-03", segment size = 1024 * 1024 * 1
* Then, we get interval number = 2, total segment size = 1024 * 1024 * 7
* At last, return the result 1024 * 1024 * 7 / 2 = 1024 * 1024 * 3.5
*
* @param datasource
* @return average data size per segment granularity
*/
private long getAvgSizePerGranularity(String datasource) {
return connector.retryWithHandle(new HandleCallback<Long>() {
Set<Interval> intervals = new HashSet<>();
long totalSize = 0;
@Override
public Long withHandle(Handle handle) {
handle.createQuery(StringUtils.format("SELECT start,%1$send%1$s,payload FROM %2$s WHERE used = true AND dataSource = :dataSource", connector.getQuoteString(), dbTables.get().getSegmentsTable())).bind("dataSource", datasource).map((int index, ResultSet r, StatementContext ctx) -> {
intervals.add(Intervals.utc(DateTimes.of(r.getString("start")).getMillis(), DateTimes.of(r.getString("end")).getMillis()));
DataSegment segment = JacksonUtils.readValue(objectMapper, r.getBytes("payload"), DataSegment.class);
totalSize += segment.getSize();
return null;
}).list();
return intervals.isEmpty() ? 0L : totalSize / intervals.size();
}
});
}
use of jnc.platform.win32.Handle in project druid by druid-io.
the class JdbcCacheGenerator method generateCache.
@Override
@Nullable
public CacheScheduler.VersionedCache generateCache(final JdbcExtractionNamespace namespace, final CacheScheduler.EntryImpl<JdbcExtractionNamespace> entryId, final String lastVersion, final CacheScheduler scheduler) {
final long lastCheck = lastVersion == null ? JodaUtils.MIN_INSTANT : Long.parseLong(lastVersion);
final Long lastDBUpdate;
final long dbQueryStart;
try {
lastDBUpdate = lastUpdates(entryId, namespace);
if (lastDBUpdate != null && lastDBUpdate <= lastCheck) {
return null;
}
} catch (UnableToObtainConnectionException e) {
if (e.getMessage().contains(NO_SUITABLE_DRIVER_FOUND_ERROR)) {
throw new ISE(e, JDBC_DRIVER_JAR_FILES_MISSING_ERROR);
} else {
throw e;
}
}
dbQueryStart = System.currentTimeMillis();
LOG.debug("Updating %s", entryId);
final String newVersion;
if (lastDBUpdate != null) {
newVersion = lastDBUpdate.toString();
} else {
newVersion = StringUtils.format("%d", dbQueryStart);
}
final CacheScheduler.VersionedCache versionedCache = scheduler.createVersionedCache(entryId, newVersion);
final long startNs = System.nanoTime();
try (Handle handle = getHandle(entryId, namespace);
ResultIterator<Pair<String, String>> pairs = getLookupPairs(handle, namespace)) {
final Map<String, String> cache = versionedCache.getCache();
final MapPopulator.PopulateResult populateResult = MapPopulator.populateAndWarnAtByteLimit(pairs, cache, (long) (MAX_MEMORY * namespace.getMaxHeapPercentage() / 100.0), null == entryId ? null : entryId.toString());
final long duration = System.nanoTime() - startNs;
LOG.info("Finished loading %,d values (%d bytes) for [%s] in %,d ns", populateResult.getEntries(), populateResult.getBytes(), entryId, duration);
return versionedCache;
} catch (UnableToObtainConnectionException e) {
if (e.getMessage().contains(NO_SUITABLE_DRIVER_FOUND_ERROR)) {
throw new ISE(e, JDBC_DRIVER_JAR_FILES_MISSING_ERROR);
} else {
throw e;
}
} catch (Throwable t) {
try {
versionedCache.close();
} catch (Exception e) {
t.addSuppressed(e);
}
throw t;
}
}
use of jnc.platform.win32.Handle in project druid by druid-io.
the class JdbcExtractionNamespaceTest method setup.
@Before
public void setup() throws Exception {
lifecycle = new Lifecycle();
updates = new AtomicLong(0L);
updateLock = new ReentrantLock(true);
closer = Closer.create();
setupTeardownService = MoreExecutors.listeningDecorator(Execs.multiThreaded(2, "JDBCExtractionNamespaceTeardown--%s"));
final ListenableFuture<Handle> setupFuture = setupTeardownService.submit(new Callable<Handle>() {
@Override
public Handle call() {
final Handle handle = derbyConnectorRule.getConnector().getDBI().open();
Assert.assertEquals(0, handle.createStatement(StringUtils.format("CREATE TABLE %s (%s TIMESTAMP, %s VARCHAR(64), %s VARCHAR(64), %s VARCHAR(64))", TABLE_NAME, TS_COLUMN, FILTER_COLUMN, KEY_NAME, VAL_NAME)).setQueryTimeout(1).execute());
handle.createStatement(StringUtils.format("TRUNCATE TABLE %s", TABLE_NAME)).setQueryTimeout(1).execute();
handle.commit();
closer.register(new Closeable() {
@Override
public void close() throws IOException {
handle.createStatement("DROP TABLE " + TABLE_NAME).setQueryTimeout(1).execute();
final ListenableFuture future = setupTeardownService.submit(new Runnable() {
@Override
public void run() {
handle.close();
}
});
try (Closeable closeable = new Closeable() {
@Override
public void close() {
future.cancel(true);
}
}) {
future.get(10, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Error closing handle", e);
}
}
});
closer.register(new Closeable() {
@Override
public void close() {
if (scheduler == null) {
return;
}
Assert.assertEquals(0, scheduler.getActiveEntries());
}
});
for (Map.Entry<String, String[]> entry : RENAMES.entrySet()) {
try {
String key = entry.getKey();
String value = entry.getValue()[0];
String filter = entry.getValue()[1];
insertValues(handle, key, value, filter, "2015-01-01 00:00:00");
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new RuntimeException(e);
}
}
NoopServiceEmitter noopServiceEmitter = new NoopServiceEmitter();
scheduler = new CacheScheduler(noopServiceEmitter, ImmutableMap.of(JdbcExtractionNamespace.class, new CacheGenerator<JdbcExtractionNamespace>() {
private final JdbcCacheGenerator delegate = new JdbcCacheGenerator();
@Override
public CacheScheduler.VersionedCache generateCache(final JdbcExtractionNamespace namespace, final CacheScheduler.EntryImpl<JdbcExtractionNamespace> id, final String lastVersion, final CacheScheduler scheduler) throws InterruptedException {
updateLock.lockInterruptibly();
try {
log.debug("Running cache generator");
try {
return delegate.generateCache(namespace, id, lastVersion, scheduler);
} finally {
updates.incrementAndGet();
}
} finally {
updateLock.unlock();
}
}
}), new OnHeapNamespaceExtractionCacheManager(lifecycle, noopServiceEmitter, new NamespaceExtractionConfig()));
try {
lifecycle.start();
} catch (Exception e) {
throw new RuntimeException(e);
}
closer.register(new Closeable() {
@Override
public void close() throws IOException {
final ListenableFuture future = setupTeardownService.submit(new Runnable() {
@Override
public void run() {
lifecycle.stop();
}
});
try (final Closeable closeable = new Closeable() {
@Override
public void close() {
future.cancel(true);
}
}) {
future.get(30, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
throw new IOException("Error stopping lifecycle", e);
}
}
});
return handle;
}
});
try (final Closeable ignore = () -> setupFuture.cancel(true)) {
handleRef = setupFuture.get(10, TimeUnit.SECONDS);
}
Assert.assertNotNull(handleRef);
}
Aggregations