Search in sources :

Example 6 with Transaction

use of org.skife.jdbi.v2.sqlobject.Transaction in project irontest by zheng-wang.

the class UtilsDAO method getTestcaseDataTable.

/**
 * @param testcaseId
 * @param fetchFirstRowOnly if true, only the first data table row (if exists) will be fetched; if false, all rows will be fetched.
 * @return
 */
@Transaction
public DataTable getTestcaseDataTable(long testcaseId, boolean fetchFirstRowOnly) {
    DataTable dataTable = new DataTable();
    List<DataTableColumn> columns = dataTableColumnDAO().findByTestcaseId(testcaseId);
    // populate the data table rows Java model column by column
    List<LinkedHashMap<String, Object>> rows = new ArrayList<>();
    for (DataTableColumn column : columns) {
        List<DataTableCell> columnCells = dataTableCellDAO().findByColumnId(column.getId());
        for (DataTableCell columnCell : columnCells) {
            short rowSequence = columnCell.getRowSequence();
            if (rows.size() < rowSequence) {
                rows.add(new LinkedHashMap<String, Object>());
            }
            Object cellObject;
            if (columnCell.getValue() != null) {
                cellObject = columnCell.getValue();
            } else {
                cellObject = endpointDAO().findById(columnCell.getEndpointId());
            }
            rows.get(rowSequence - 1).put(column.getName(), cellObject);
            if (fetchFirstRowOnly && rows.size() == 1) {
                break;
            }
        }
    }
    if (columns.size() > 0) {
        dataTable = new DataTable();
        dataTable.setColumns(columns);
        dataTable.setRows(rows);
    }
    return dataTable;
}
Also used : DataTableCell(io.irontest.models.DataTableCell) DataTable(io.irontest.models.DataTable) DataTableColumn(io.irontest.models.DataTableColumn) ArrayList(java.util.ArrayList) CreateSqlObject(org.skife.jdbi.v2.sqlobject.CreateSqlObject) LinkedHashMap(java.util.LinkedHashMap) Transaction(org.skife.jdbi.v2.sqlobject.Transaction)

Example 7 with Transaction

use of org.skife.jdbi.v2.sqlobject.Transaction in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method allocatePendingSegment.

@Override
public SegmentIdentifier allocatePendingSegment(final String dataSource, final String sequenceName, final String previousSegmentId, final Interval interval, final String maxVersion) throws IOException {
    Preconditions.checkNotNull(dataSource, "dataSource");
    Preconditions.checkNotNull(sequenceName, "sequenceName");
    Preconditions.checkNotNull(interval, "interval");
    Preconditions.checkNotNull(maxVersion, "maxVersion");
    final String previousSegmentIdNotNull = previousSegmentId == null ? "" : previousSegmentId;
    return connector.retryTransaction(new TransactionCallback<SegmentIdentifier>() {

        @Override
        public SegmentIdentifier inTransaction(Handle handle, TransactionStatus transactionStatus) throws Exception {
            final List<byte[]> existingBytes = handle.createQuery(String.format("SELECT payload FROM %s WHERE " + "dataSource = :dataSource AND " + "sequence_name = :sequence_name AND " + "sequence_prev_id = :sequence_prev_id", dbTables.getPendingSegmentsTable())).bind("dataSource", dataSource).bind("sequence_name", sequenceName).bind("sequence_prev_id", previousSegmentIdNotNull).map(ByteArrayMapper.FIRST).list();
            if (!existingBytes.isEmpty()) {
                final SegmentIdentifier existingIdentifier = jsonMapper.readValue(Iterables.getOnlyElement(existingBytes), SegmentIdentifier.class);
                if (existingIdentifier.getInterval().getStartMillis() == interval.getStartMillis() && existingIdentifier.getInterval().getEndMillis() == interval.getEndMillis()) {
                    log.info("Found existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB", existingIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull);
                    return existingIdentifier;
                } else {
                    log.warn("Cannot use existing pending segment [%s] for sequence[%s] (previous = [%s]) in DB, " + "does not match requested interval[%s]", existingIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull, interval);
                    return null;
                }
            }
            // Make up a pending segment based on existing segments and pending segments in the DB. This works
            // assuming that all tasks inserting segments at a particular point in time are going through the
            // allocatePendingSegment flow. This should be assured through some other mechanism (like task locks).
            final SegmentIdentifier newIdentifier;
            final List<TimelineObjectHolder<String, DataSegment>> existingChunks = getTimelineForIntervalsWithHandle(handle, dataSource, ImmutableList.of(interval)).lookup(interval);
            if (existingChunks.size() > 1) {
                // Not possible to expand more than one chunk with a single segment.
                log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: already have [%,d] chunks.", dataSource, interval, maxVersion, existingChunks.size());
                return null;
            } else {
                SegmentIdentifier max = null;
                if (!existingChunks.isEmpty()) {
                    TimelineObjectHolder<String, DataSegment> existingHolder = Iterables.getOnlyElement(existingChunks);
                    for (PartitionChunk<DataSegment> existing : existingHolder.getObject()) {
                        if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject().getShardSpec().getPartitionNum()) {
                            max = SegmentIdentifier.fromDataSegment(existing.getObject());
                        }
                    }
                }
                final List<SegmentIdentifier> pendings = getPendingSegmentsForIntervalWithHandle(handle, dataSource, interval);
                for (SegmentIdentifier pending : pendings) {
                    if (max == null || pending.getVersion().compareTo(max.getVersion()) > 0 || (pending.getVersion().equals(max.getVersion()) && pending.getShardSpec().getPartitionNum() > max.getShardSpec().getPartitionNum())) {
                        max = pending;
                    }
                }
                if (max == null) {
                    newIdentifier = new SegmentIdentifier(dataSource, interval, maxVersion, new NumberedShardSpec(0, 0));
                } else if (!max.getInterval().equals(interval) || max.getVersion().compareTo(maxVersion) > 0) {
                    log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: conflicting segment[%s].", dataSource, interval, maxVersion, max.getIdentifierAsString());
                    return null;
                } else if (max.getShardSpec() instanceof LinearShardSpec) {
                    newIdentifier = new SegmentIdentifier(dataSource, max.getInterval(), max.getVersion(), new LinearShardSpec(max.getShardSpec().getPartitionNum() + 1));
                } else if (max.getShardSpec() instanceof NumberedShardSpec) {
                    newIdentifier = new SegmentIdentifier(dataSource, max.getInterval(), max.getVersion(), new NumberedShardSpec(max.getShardSpec().getPartitionNum() + 1, ((NumberedShardSpec) max.getShardSpec()).getPartitions()));
                } else {
                    log.warn("Cannot allocate new segment for dataSource[%s], interval[%s], maxVersion[%s]: ShardSpec class[%s] used by [%s].", dataSource, interval, maxVersion, max.getShardSpec().getClass(), max.getIdentifierAsString());
                    return null;
                }
            }
            // SELECT -> INSERT can fail due to races; callers must be prepared to retry.
            // Avoiding ON DUPLICATE KEY since it's not portable.
            // Avoiding try/catch since it may cause inadvertent transaction-splitting.
            // UNIQUE key for the row, ensuring sequences do not fork in two directions.
            // Using a single column instead of (sequence_name, sequence_prev_id) as some MySQL storage engines
            // have difficulty with large unique keys (see https://github.com/druid-io/druid/issues/2319)
            final String sequenceNamePrevIdSha1 = BaseEncoding.base16().encode(Hashing.sha1().newHasher().putBytes(StringUtils.toUtf8(sequenceName)).putByte((byte) 0xff).putBytes(StringUtils.toUtf8(previousSegmentIdNotNull)).hash().asBytes());
            handle.createStatement(String.format("INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, sequence_name, sequence_prev_id, sequence_name_prev_id_sha1, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :sequence_name, :sequence_prev_id, :sequence_name_prev_id_sha1, :payload)", dbTables.getPendingSegmentsTable(), connector.getQuoteString())).bind("id", newIdentifier.getIdentifierAsString()).bind("dataSource", dataSource).bind("created_date", new DateTime().toString()).bind("start", interval.getStart().toString()).bind("end", interval.getEnd().toString()).bind("sequence_name", sequenceName).bind("sequence_prev_id", previousSegmentIdNotNull).bind("sequence_name_prev_id_sha1", sequenceNamePrevIdSha1).bind("payload", jsonMapper.writeValueAsBytes(newIdentifier)).execute();
            log.info("Allocated pending segment [%s] for sequence[%s] (previous = [%s]) in DB", newIdentifier.getIdentifierAsString(), sequenceName, previousSegmentIdNotNull);
            return newIdentifier;
        }
    }, ALLOCATE_SEGMENT_QUIET_TRIES, SQLMetadataConnector.DEFAULT_MAX_TRIES);
}
Also used : SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) LinearShardSpec(io.druid.timeline.partition.LinearShardSpec) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) DateTime(org.joda.time.DateTime) Handle(org.skife.jdbi.v2.Handle) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) PartitionChunk(io.druid.timeline.partition.PartitionChunk) NumberedShardSpec(io.druid.timeline.partition.NumberedShardSpec)

Example 8 with Transaction

use of org.skife.jdbi.v2.sqlobject.Transaction in project presto by prestodb.

the class DatabaseShardManager method dropTable.

@Override
public void dropTable(long tableId) {
    runTransaction(dbi, (handle, status) -> {
        lockTable(handle, tableId);
        ShardDao shardDao = shardDaoSupplier.attach(handle);
        shardDao.insertDeletedShards(tableId);
        shardDao.dropShardNodes(tableId);
        shardDao.dropShards(tableId);
        handle.attach(ShardOrganizerDao.class).dropOrganizerJobs(tableId);
        MetadataDao dao = handle.attach(MetadataDao.class);
        dao.dropColumns(tableId);
        dao.dropTable(tableId);
        return null;
    });
    // It is not possible to drop the index tables in a transaction.
    try (Handle handle = dbi.open()) {
        handle.execute("DROP TABLE " + shardIndexTable(tableId));
    } catch (DBIException e) {
        log.warn(e, "Failed to drop index table %s", shardIndexTable(tableId));
    }
}
Also used : ShardOrganizerDao(com.facebook.presto.raptor.storage.organization.ShardOrganizerDao) DBIException(org.skife.jdbi.v2.exceptions.DBIException) RaptorColumnHandle(com.facebook.presto.raptor.RaptorColumnHandle) Handle(org.skife.jdbi.v2.Handle)

Example 9 with Transaction

use of org.skife.jdbi.v2.sqlobject.Transaction in project hive by apache.

the class DruidStorageHandler method loadDruidSegments.

protected void loadDruidSegments(Table table, boolean overwrite) throws MetaException {
    // at this point we have Druid segments from reducers but we need to atomically
    // rename and commit to metadata
    final String dataSourceName = table.getParameters().get(Constants.DRUID_DATA_SOURCE);
    final List<DataSegment> segmentList = Lists.newArrayList();
    final Path tableDir = getSegmentDescriptorDir();
    // Read the created segments metadata from the table staging directory
    try {
        segmentList.addAll(DruidStorageHandlerUtils.getCreatedSegments(tableDir, getConf()));
    } catch (IOException e) {
        LOG.error("Failed to load segments descriptor from directory {}", tableDir.toString());
        Throwables.propagate(e);
        cleanWorkingDir();
    }
    // Moving Druid segments and committing to druid metadata as one transaction.
    final HdfsDataSegmentPusherConfig hdfsSegmentPusherConfig = new HdfsDataSegmentPusherConfig();
    List<DataSegment> publishedDataSegmentList = Lists.newArrayList();
    final String segmentDirectory = table.getParameters().get(Constants.DRUID_SEGMENT_DIRECTORY) != null ? table.getParameters().get(Constants.DRUID_SEGMENT_DIRECTORY) : HiveConf.getVar(getConf(), HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY);
    LOG.info(String.format("Moving [%s] Druid segments from staging directory [%s] to Deep storage [%s]", segmentList.size(), getStagingWorkingDir(), segmentDirectory));
    hdfsSegmentPusherConfig.setStorageDirectory(segmentDirectory);
    try {
        DataSegmentPusher dataSegmentPusher = new HdfsDataSegmentPusher(hdfsSegmentPusherConfig, getConf(), DruidStorageHandlerUtils.JSON_MAPPER);
        publishedDataSegmentList = DruidStorageHandlerUtils.publishSegmentsAndCommit(getConnector(), getDruidMetadataStorageTablesConfig(), dataSourceName, segmentList, overwrite, getConf(), dataSegmentPusher);
    } catch (CallbackFailedException | IOException e) {
        LOG.error("Failed to move segments from staging directory");
        if (e instanceof CallbackFailedException) {
            Throwables.propagate(e.getCause());
        }
        Throwables.propagate(e);
    } finally {
        cleanWorkingDir();
    }
    checkLoadStatus(publishedDataSegmentList);
}
Also used : Path(org.apache.hadoop.fs.Path) DataSegmentPusher(io.druid.segment.loading.DataSegmentPusher) HdfsDataSegmentPusher(io.druid.storage.hdfs.HdfsDataSegmentPusher) HdfsDataSegmentPusherConfig(io.druid.storage.hdfs.HdfsDataSegmentPusherConfig) IOException(java.io.IOException) DataSegment(io.druid.timeline.DataSegment) HdfsDataSegmentPusher(io.druid.storage.hdfs.HdfsDataSegmentPusher) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException)

Aggregations

IOException (java.io.IOException)6 DataSegment (io.druid.timeline.DataSegment)5 SQLException (java.sql.SQLException)5 Handle (org.skife.jdbi.v2.Handle)5 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)5 ArrayList (java.util.ArrayList)4 DateTime (org.joda.time.DateTime)4 List (java.util.List)3 TransactionStatus (org.skife.jdbi.v2.TransactionStatus)3 ImmutableList (com.google.common.collect.ImmutableList)2 NoneShardSpec (io.druid.timeline.partition.NoneShardSpec)2 Set (java.util.Set)2 StatementContext (org.skife.jdbi.v2.StatementContext)2 RaptorColumnHandle (com.facebook.presto.raptor.RaptorColumnHandle)1 ShardOrganizerDao (com.facebook.presto.raptor.storage.organization.ShardOrganizerDao)1 InjectableValues (com.fasterxml.jackson.databind.InjectableValues)1 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 NamedType (com.fasterxml.jackson.databind.jsontype.NamedType)1 SmileFactory (com.fasterxml.jackson.dataformat.smile.SmileFactory)1 Throwables (com.google.common.base.Throwables)1