Search in sources :

Example 71 with Handle

use of jnc.platform.win32.Handle in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method allocatePendingSegment.

@Override
public SegmentIdentifier allocatePendingSegment(final String dataSource, final String sequenceName, final String previousSegmentId, final Interval interval, final String maxVersion) throws IOException {
    Preconditions.checkNotNull(dataSource, "dataSource");
    Preconditions.checkNotNull(sequenceName, "sequenceName");
    Preconditions.checkNotNull(interval, "interval");
    Preconditions.checkNotNull(maxVersion, "maxVersion");
    final String previousSegmentIdNotNull = previousSegmentId == null ? "" : previousSegmentId;
    return connector.retryTransaction(new TransactionCallback<SegmentIdentifier>() {

        @Override
        public SegmentIdentifier inTransaction(Handle handle, TransactionStatus transactionStatus) throws Exception {
            final List<byte[]> existingBytes = handle.createQuery(String.format("SELECT payload FROM %s WHERE " + "dataSource = :dataSource AND " + "sequence_name = :sequence_name AND " + "sequence_prev_id = :sequence_prev_id", dbTables.getPendingSegmentsTable())).bind("dataSource", dataSource).bind("sequence_name", sequenceName).bind("sequence_prev_id", previousSegmentIdNotNull).map(ByteArrayMapper.FIRST).list();
            if (!existingBytes.isEmpty()) {
                final SegmentIdentifier existingIdentifier = jsonMapper.readValue(Iterables.getOnlyElement(existingBytes), SegmentIdentifier.class);
                if (existingIdentifier.getInterval().getStartMillis() == interval.getStartMillis() && existingIdentifier.getInterval().getEndMillis() == interval.getEndMillis()) {
                    log.info("Found existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB", existingIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull);
                    return existingIdentifier;
                } else {
                    log.warn("Cannot use existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB, " + "does not match requested interval[%s]", existingIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull, interval);
                    return null;
                }
            }
            // Make up a pending segment based on existing segments and pending segments in the DB. This works
            // assuming that all tasks inserting segments at a particular point in time are going through the
            // allocatePendingSegment flow. This should be assured through some other mechanism (like task locks).
            final SegmentIdentifier newIdentifier;
            final List<TimelineObjectHolder<String, DataSegment>> existingChunks = getTimelineForIntervalsWithHandle(handle, dataSource, ImmutableList.of(interval)).lookup(interval);
            if (existingChunks.size() > 1) {
                // Not possible to expand more than one chunk with a single segment.
                log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: already have [%,d] chunks.", dataSource, interval, maxVersion, existingChunks.size());
                return null;
            } else {
                SegmentIdentifier max = null;
                if (!existingChunks.isEmpty()) {
                    TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
                    for (PartitionChunk<DataSegment> existing : existingHolder.getObject()) {
                        if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject().getShardSpec().getPartitionNum()) {
                            max = SegmentIdentifier.fromDataSegment(existing.getObject());
                        }
                    }
                }
                final List<SegmentIdentifier> pendings = getPendingSegmentsForIntervalWithHandle(handle, dataSource, interval);
                for (SegmentIdentifier pending : pendings) {
                    if (max == null || pending.getVersion().compareTo(max.getVersion()) > 0 || (pending.getVersion().equals(max.getVersion()) && pending.getShardSpec().getPartitionNum() > max.getShardSpec().getPartitionNum())) {
                        max = pending;
                    }
                }
                if (max == null) {
                    newIdentifier = new SegmentIdentifier(dataSource, interval, maxVersion, new NumberedShardSpec(0, 0));
                } else if (!max.getInterval().equals(interval) || max.getVersion().compareTo(maxVersion) > 0) {
                    log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: conflicting segment[%s].", dataSource, interval, maxVersion, max.getIdentifierAsString());
                    return null;
                } else if (max.getShardSpec() instanceof LinearShardSpec) {
                    newIdentifier = new SegmentIdentifier(dataSource, max.getInterval(), max.getVersion(), new LinearShardSpec(max.getShardSpec().getPartitionNum() + 1));
                } else if (max.getShardSpec() instanceof NumberedShardSpec) {
                    newIdentifier = new SegmentIdentifier(dataSource, max.getInterval(), max.getVersion(), new NumberedShardSpec(max.getShardSpec().getPartitionNum() + 1, ((NumberedShardSpec) max.getShardSpec()).getPartitions()));
                } else {
                    log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: ShardSpec class[%s] used by [%s].", dataSource, interval, maxVersion, max.getShardSpec().getClass(), max.getIdentifierAsString());
                    return null;
                }
            }
            // SELECT -> INSERT can fail due to races; callers must be prepared to retry.
            // Avoiding ON DUPLICATE KEY since it's not portable.
            // Avoiding try/catch since it may cause inadvertent transaction-splitting.
            // UNIQUE key for the row, ensuring sequences do not fork in two directions.
            // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines
            // have difficulty with large unique keys (see https://github.com/druid-io/druid/issues/2319)
            final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode(Hashing.sha1().newHasher().putBytes(StringUtils.toUtf8(sequenceName)).putByte((byte) 0xff).putBytes(StringUtils.toUtf8(previousSegmentIdNotNull)).hash().asBytes());
            handle.createStatement(String.format("INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, sequence_name, sequence_prev_id, sequence_name_prev_id_sha1, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :sequence_name, :sequence_prev_id, :sequence_name_prev_id_sha1, :payload)", dbTables.getPendingSegmentsTable(), connector.getQuoteString())).bind("id", newIdentifier.getIdentifierAsString()).bind("dataSource", dataSource).bind("created_date", new DateTime().toString()).bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString()).bind("sequence_name", sequenceName).bind("sequence_prev_id", previousSegmentIdNotNull).bind("sequence_name_prev_id_sha1", sequenceNamePrevIdSha1).bind("payload", jsonMapper.writeValueAsBytes(newIdentifier)).execute();
            log.info("Allocated pending segment [%s] for sequence[%s] (previous = [%s]) in DB", newIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull);
            return newIdentifier;
        }
    }, ALLOCATE_SEGMENT_QUIET_TRIES, SQLMetadataConnector.DEFAULT_MAX_TRIES);
}
Also used : SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) DateTime(org.joda.time.DateTime) Handle(org.skife.jdbi.v2.Handle) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) PartitionChunk(io.druid.timeline.partition.PartitionChunk) NumberedShardSpec(io.druid.timeline.partition.NumberedShardSpec)

Example 72 with Handle

use of jnc.platform.win32.Handle in project Singularity by HubSpot.

the class SingularityHistoryTest method blowDBAway.

@After
public void blowDBAway() {
    Handle handle = dbiProvider.get().open();
    handle.execute("DELETE FROM taskHistory;DELETE FROM requestHistory;DELETE FROM deployHistory;");
    handle.close();
}
Also used : Handle(org.skife.jdbi.v2.Handle) After(org.junit.After)

Example 73 with Handle

use of jnc.platform.win32.Handle in project syndesis by syndesisio.

the class SqlFileStore method doReadPostgres.

/**
 * Postgres does not allow to read from the large object after the connection has been closed.
 */
private InputStream doReadPostgres(String path) {
    Handle h = dbi.open();
    try {
        h.getConnection().setAutoCommit(false);
        List<Map<String, Object>> res = h.select("SELECT data FROM filestore WHERE path=?", path);
        Optional<Long> oid = res.stream().map(row -> row.get("data")).map(Long.class::cast).findFirst();
        if (oid.isPresent()) {
            LargeObjectManager lobj = getPostgresConnection(h.getConnection()).getLargeObjectAPI();
            LargeObject obj = lobj.open(oid.get(), LargeObjectManager.READ);
            return new HandleCloserInputStream(h, obj.getInputStream());
        } else {
            h.close();
            return null;
        }
    } catch (SQLException e) {
        IOUtils.closeQuietly(h);
        throw DaoException.launderThrowable(e);
    }
}
Also used : LargeObjectManager(org.postgresql.largeobject.LargeObjectManager) SQLException(java.sql.SQLException) LargeObject(org.postgresql.largeobject.LargeObject) Map(java.util.Map) Handle(org.skife.jdbi.v2.Handle)

Example 74 with Handle

use of jnc.platform.win32.Handle in project syndesis by syndesisio.

the class SqlJsonDB method withGlobalTransaction.

@Override
public void withGlobalTransaction(final Consumer<JsonDB> handler) {
    final String checkpoint = UUID.randomUUID().toString();
    try (Handle handle = dbi.open()) {
        handle.begin();
        handle.checkpoint(checkpoint);
        try (Connection connection = handle.getConnection();
            Connection transacted = withoutTransactionControl(connection)) {
            final TransactedEventBus transactedBus = new TransactedEventBus(bus);
            final SqlJsonDB checkpointed = new SqlJsonDB(new DBI(() -> transacted), transactedBus);
            boolean committed = false;
            try {
                handler.accept(checkpointed);
                handle.commit();
                committed = true;
                transactedBus.commit();
            } catch (@SuppressWarnings("PMD.AvoidCatchingGenericException") final RuntimeException e) {
                if (!committed) {
                    // if event bus blows up we can't rollback as the transaction has already been committed
                    handle.rollback(checkpoint);
                }
                throw SyndesisServerException.launderThrowable(e);
            }
        } catch (SQLException e) {
            throw SyndesisServerException.launderThrowable(e);
        }
    }
}
Also used : TransactedEventBus(io.syndesis.common.util.TransactedEventBus) SQLException(java.sql.SQLException) Connection(java.sql.Connection) DBI(org.skife.jdbi.v2.DBI) Handle(org.skife.jdbi.v2.Handle)

Example 75 with Handle

use of jnc.platform.win32.Handle in project SpinalTap by airbnb.

the class LatestMysqlSchemaStore method getLatest.

@Override
public Map<String, MysqlTableSchema> getLatest(@NotNull final String database) {
    List<ColumnInfo> allColumnInfo;
    try (Handle handle = jdbi.open()) {
        allColumnInfo = MysqlSchemaUtil.LIST_COLUMNINFO_RETRYER.call(() -> handle.createQuery(ALL_TABLE_SCHEMA_QUERY).bind("db", database).map(MysqlSchemaUtil.COLUMN_MAPPER).list());
    } catch (Exception ex) {
        log.error(String.format("Failed to fetch schema for database: %s", database), ex);
        Throwables.throwIfUnchecked(ex);
        throw new RuntimeException(ex);
    }
    Map<String, MysqlTableSchema> allTableSchemaMap = Maps.newHashMap();
    allColumnInfo.forEach(columnInfo -> {
        String table = columnInfo.getTable();
        allTableSchemaMap.computeIfAbsent(table, __ -> MysqlSchemaUtil.createTableSchema(source, database, table, getTableDDL(database, table), Lists.newArrayList())).getColumnInfo().add(columnInfo);
    });
    return allTableSchemaMap;
}
Also used : SQLException(java.sql.SQLException) Handle(org.skife.jdbi.v2.Handle)

Aggregations

Handle (org.skife.jdbi.v2.Handle)103 DBI (org.skife.jdbi.v2.DBI)28 Before (org.junit.Before)21 IOException (java.io.IOException)18 List (java.util.List)17 DataSourceFactory (io.dropwizard.db.DataSourceFactory)15 DBIFactory (io.dropwizard.jdbi.DBIFactory)15 SQLException (java.sql.SQLException)15 Map (java.util.Map)14 Test (org.junit.Test)14 Test (org.testng.annotations.Test)14 DateTime (org.joda.time.DateTime)13 ArrayList (java.util.ArrayList)11 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)11 ResultSet (java.sql.ResultSet)10 ImmutableList (com.google.common.collect.ImmutableList)8 UUID (java.util.UUID)8 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)7 ImmutableSet (com.google.common.collect.ImmutableSet)6 Set (java.util.Set)6