Search in sources :

Example 1 with CallbackFailedException

use of org.skife.jdbi.v2.exceptions.CallbackFailedException in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegments.

/**
   * {@inheritDoc}
   */
@Override
public SegmentPublishResult announceHistoricalSegments(final Set<DataSegment> segments, final DataSourceMetadata startMetadata, final DataSourceMetadata endMetadata) throws IOException {
    if (segments.isEmpty()) {
        throw new IllegalArgumentException("segment set must not be empty");
    }
    final String dataSource = segments.iterator().next().getDataSource();
    for (DataSegment segment : segments) {
        if (!dataSource.equals(segment.getDataSource())) {
            throw new IllegalArgumentException("segments must all be from the same dataSource");
        }
    }
    if ((startMetadata == null && endMetadata != null) || (startMetadata != null && endMetadata == null)) {
        throw new IllegalArgumentException("start/end metadata pair must be either null or non-null");
    }
    // Find which segments are used (i.e. not overshadowed).
    final Set<DataSegment> usedSegments = Sets.newHashSet();
    for (TimelineObjectHolder<String, DataSegment> holder : VersionedIntervalTimeline.forSegments(segments).lookup(JodaUtils.ETERNITY)) {
        for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
            usedSegments.add(chunk.getObject());
        }
    }
    final AtomicBoolean txnFailure = new AtomicBoolean(false);
    try {
        return connector.retryTransaction(new TransactionCallback<SegmentPublishResult>() {

            @Override
            public SegmentPublishResult inTransaction(final Handle handle, final TransactionStatus transactionStatus) throws Exception {
                final Set<DataSegment> inserted = Sets.newHashSet();
                if (startMetadata != null) {
                    final DataSourceMetadataUpdateResult result = updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
                    if (result != DataSourceMetadataUpdateResult.SUCCESS) {
                        transactionStatus.setRollbackOnly();
                        txnFailure.set(true);
                        if (result == DataSourceMetadataUpdateResult.FAILURE) {
                            throw new RuntimeException("Aborting transaction!");
                        } else if (result == DataSourceMetadataUpdateResult.TRY_AGAIN) {
                            throw new RetryTransactionException("Aborting transaction!");
                        }
                    }
                }
                for (final DataSegment segment : segments) {
                    if (announceHistoricalSegment(handle, segment, usedSegments.contains(segment))) {
                        inserted.add(segment);
                    }
                }
                return new SegmentPublishResult(ImmutableSet.copyOf(inserted), true);
            }
        }, 3, SQLMetadataConnector.DEFAULT_MAX_TRIES);
    } catch (CallbackFailedException e) {
        if (txnFailure.get()) {
            return new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false);
        } else {
            throw e;
        }
    }
}
Also used : ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) TransactionStatus(org.skife.jdbi.v2.TransactionStatus) DataSegment(io.druid.timeline.DataSegment) SQLException(java.sql.SQLException) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) Handle(org.skife.jdbi.v2.Handle) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) SegmentPublishResult(io.druid.indexing.overlord.SegmentPublishResult)

Example 2 with CallbackFailedException

use of org.skife.jdbi.v2.exceptions.CallbackFailedException in project druid by druid-io.

the class HadoopConverterJobTest method setUp.

@Before
public void setUp() throws Exception {
    final MetadataStorageUpdaterJobSpec metadataStorageUpdaterJobSpec = new MetadataStorageUpdaterJobSpec() {

        @Override
        public String getSegmentTable() {
            return derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable();
        }

        @Override
        public MetadataStorageConnectorConfig get() {
            return derbyConnectorRule.getMetadataConnectorConfig();
        }
    };
    final File scratchFileDir = temporaryFolder.newFolder();
    storageLocProperty = System.getProperty(STORAGE_PROPERTY_KEY);
    tmpSegmentDir = temporaryFolder.newFolder();
    System.setProperty(STORAGE_PROPERTY_KEY, tmpSegmentDir.getAbsolutePath());
    final URL url = Preconditions.checkNotNull(Query.class.getClassLoader().getResource("druid.sample.tsv"));
    final File tmpInputFile = temporaryFolder.newFile();
    FileUtils.retryCopy(new ByteSource() {

        @Override
        public InputStream openStream() throws IOException {
            return url.openStream();
        }
    }, tmpInputFile, FileUtils.IS_EXCEPTION, 3);
    final HadoopDruidIndexerConfig hadoopDruidIndexerConfig = new HadoopDruidIndexerConfig(new HadoopIngestionSpec(new DataSchema(DATASOURCE, HadoopDruidIndexerConfig.JSON_MAPPER.convertValue(new StringInputRowParser(new DelimitedParseSpec(new TimestampSpec("ts", "iso", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(Arrays.asList(TestIndex.DIMENSIONS)), null, null), "\t", "", Arrays.asList(TestIndex.COLUMNS)), null), Map.class), new AggregatorFactory[] { new DoubleSumAggregatorFactory(TestIndex.METRICS[0], TestIndex.METRICS[0]), new HyperUniquesAggregatorFactory("quality_uniques", "quality") }, new UniformGranularitySpec(Granularities.MONTH, Granularities.DAY, ImmutableList.<Interval>of(interval)), HadoopDruidIndexerConfig.JSON_MAPPER), new HadoopIOConfig(ImmutableMap.<String, Object>of("type", "static", "paths", tmpInputFile.getAbsolutePath()), metadataStorageUpdaterJobSpec, tmpSegmentDir.getAbsolutePath()), new HadoopTuningConfig(scratchFileDir.getAbsolutePath(), null, null, null, null, null, false, false, false, false, null, false, false, null, null, null, false, false)));
    metadataStorageTablesConfigSupplier = derbyConnectorRule.metadataTablesConfigSupplier();
    connector = derbyConnectorRule.getConnector();
    try {
        connector.getDBI().withHandle(new HandleCallback<Void>() {

            @Override
            public Void withHandle(Handle handle) throws Exception {
                handle.execute("DROP TABLE druid_segments");
                return null;
            }
        });
    } catch (CallbackFailedException e) {
    // Who cares
    }
    List<Jobby> jobs = ImmutableList.of(new Jobby() {

        @Override
        public boolean run() {
            connector.createSegmentTable(metadataStorageUpdaterJobSpec.getSegmentTable());
            return true;
        }
    }, new HadoopDruidDetermineConfigurationJob(hadoopDruidIndexerConfig), new HadoopDruidIndexerJob(hadoopDruidIndexerConfig, new SQLMetadataStorageUpdaterJobHandler(connector)));
    JobHelper.runJobs(jobs, hadoopDruidIndexerConfig);
}
Also used : HadoopIngestionSpec(io.druid.indexer.HadoopIngestionSpec) HadoopTuningConfig(io.druid.indexer.HadoopTuningConfig) URL(java.net.URL) HadoopIOConfig(io.druid.indexer.HadoopIOConfig) UniformGranularitySpec(io.druid.segment.indexing.granularity.UniformGranularitySpec) TimestampSpec(io.druid.data.input.impl.TimestampSpec) SQLMetadataStorageUpdaterJobHandler(io.druid.indexer.SQLMetadataStorageUpdaterJobHandler) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) InputStream(java.io.InputStream) DelimitedParseSpec(io.druid.data.input.impl.DelimitedParseSpec) IOException(java.io.IOException) HadoopDruidIndexerConfig(io.druid.indexer.HadoopDruidIndexerConfig) IOException(java.io.IOException) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) Handle(org.skife.jdbi.v2.Handle) CallbackFailedException(org.skife.jdbi.v2.exceptions.CallbackFailedException) DataSchema(io.druid.segment.indexing.DataSchema) Jobby(io.druid.indexer.Jobby) HadoopDruidIndexerJob(io.druid.indexer.HadoopDruidIndexerJob) StringInputRowParser(io.druid.data.input.impl.StringInputRowParser) HyperUniquesAggregatorFactory(io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) ByteSource(com.google.common.io.ByteSource) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec) File(java.io.File) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) HadoopDruidDetermineConfigurationJob(io.druid.indexer.HadoopDruidDetermineConfigurationJob) Interval(org.joda.time.Interval) Before(org.junit.Before)

Aggregations

IOException (java.io.IOException)2 Handle (org.skife.jdbi.v2.Handle)2 CallbackFailedException (org.skife.jdbi.v2.exceptions.CallbackFailedException)2 ImmutableMap (com.google.common.collect.ImmutableMap)1 ImmutableSet (com.google.common.collect.ImmutableSet)1 ByteSource (com.google.common.io.ByteSource)1 DelimitedParseSpec (io.druid.data.input.impl.DelimitedParseSpec)1 DimensionsSpec (io.druid.data.input.impl.DimensionsSpec)1 StringInputRowParser (io.druid.data.input.impl.StringInputRowParser)1 TimestampSpec (io.druid.data.input.impl.TimestampSpec)1 HadoopDruidDetermineConfigurationJob (io.druid.indexer.HadoopDruidDetermineConfigurationJob)1 HadoopDruidIndexerConfig (io.druid.indexer.HadoopDruidIndexerConfig)1 HadoopDruidIndexerJob (io.druid.indexer.HadoopDruidIndexerJob)1 HadoopIOConfig (io.druid.indexer.HadoopIOConfig)1 HadoopIngestionSpec (io.druid.indexer.HadoopIngestionSpec)1 HadoopTuningConfig (io.druid.indexer.HadoopTuningConfig)1 Jobby (io.druid.indexer.Jobby)1 SQLMetadataStorageUpdaterJobHandler (io.druid.indexer.SQLMetadataStorageUpdaterJobHandler)1 SegmentPublishResult (io.druid.indexing.overlord.SegmentPublishResult)1 DoubleSumAggregatorFactory (io.druid.query.aggregation.DoubleSumAggregatorFactory)1