use of org.skife.jdbi.v2.exceptions.CallbackFailedException in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegments.
/**
* {@inheritDoc}
*/
@Override
public SegmentPublishResult announceHistoricalSegments(final Set<DataSegment> segments, final DataSourceMetadata startMetadata, final DataSourceMetadata endMetadata) throws IOException {
if (segments.isEmpty()) {
throw new IllegalArgumentException("segment set must not be empty");
}
final String dataSource = segments.iterator().next().getDataSource();
for (DataSegment segment : segments) {
if (!dataSource.equals(segment.getDataSource())) {
throw new IllegalArgumentException("segments must all be from the same dataSource");
}
}
if ((startMetadata == null && endMetadata != null) || (startMetadata != null && endMetadata == null)) {
throw new IllegalArgumentException("start/end metadata pair must be either null or non-null");
}
// Find which segments are used (i.e. not overshadowed).
final Set<DataSegment> usedSegments = Sets.newHashSet();
for (TimelineObjectHolder<String, DataSegment> holder : VersionedIntervalTimeline.forSegments(segments).lookup(JodaUtils.ETERNITY)) {
for (PartitionChunk<DataSegment> chunk : holder.getObject()) {
usedSegments.add(chunk.getObject());
}
}
final AtomicBoolean txnFailure = new AtomicBoolean(false);
try {
return connector.retryTransaction(new TransactionCallback<SegmentPublishResult>() {
@Override
public SegmentPublishResult inTransaction(final Handle handle, final TransactionStatus transactionStatus) throws Exception {
final Set<DataSegment> inserted = Sets.newHashSet();
if (startMetadata != null) {
final DataSourceMetadataUpdateResult result = updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata);
if (result != DataSourceMetadataUpdateResult.SUCCESS) {
transactionStatus.setRollbackOnly();
txnFailure.set(true);
if (result == DataSourceMetadataUpdateResult.FAILURE) {
throw new RuntimeException("Aborting transaction!");
} else if (result == DataSourceMetadataUpdateResult.TRY_AGAIN) {
throw new RetryTransactionException("Aborting transaction!");
}
}
}
for (final DataSegment segment : segments) {
if (announceHistoricalSegment(handle, segment, usedSegments.contains(segment))) {
inserted.add(segment);
}
}
return new SegmentPublishResult(ImmutableSet.copyOf(inserted), true);
}
}, 3, SQLMetadataConnector.DEFAULT_MAX_TRIES);
} catch (CallbackFailedException e) {
if (txnFailure.get()) {
return new SegmentPublishResult(ImmutableSet.<DataSegment>of(), false);
} else {
throw e;
}
}
}
use of org.skife.jdbi.v2.exceptions.CallbackFailedException in project druid by druid-io.
the class HadoopConverterJobTest method setUp.
@Before
public void setUp() throws Exception {
final MetadataStorageUpdaterJobSpec metadataStorageUpdaterJobSpec = new MetadataStorageUpdaterJobSpec() {
@Override
public String getSegmentTable() {
return derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable();
}
@Override
public MetadataStorageConnectorConfig get() {
return derbyConnectorRule.getMetadataConnectorConfig();
}
};
final File scratchFileDir = temporaryFolder.newFolder();
storageLocProperty = System.getProperty(STORAGE_PROPERTY_KEY);
tmpSegmentDir = temporaryFolder.newFolder();
System.setProperty(STORAGE_PROPERTY_KEY, tmpSegmentDir.getAbsolutePath());
final URL url = Preconditions.checkNotNull(Query.class.getClassLoader().getResource("druid.sample.tsv"));
final File tmpInputFile = temporaryFolder.newFile();
FileUtils.retryCopy(new ByteSource() {
@Override
public InputStream openStream() throws IOException {
return url.openStream();
}
}, tmpInputFile, FileUtils.IS_EXCEPTION, 3);
final HadoopDruidIndexerConfig hadoopDruidIndexerConfig = new HadoopDruidIndexerConfig(new HadoopIngestionSpec(new DataSchema(DATASOURCE, HadoopDruidIndexerConfig.JSON_MAPPER.convertValue(new StringInputRowParser(new DelimitedParseSpec(new TimestampSpec("ts", "iso", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(Arrays.asList(TestIndex.DIMENSIONS)), null, null), "\t", "", Arrays.asList(TestIndex.COLUMNS)), null), Map.class), new AggregatorFactory[] { new DoubleSumAggregatorFactory(TestIndex.METRICS[0], TestIndex.METRICS[0]), new HyperUniquesAggregatorFactory("quality_uniques", "quality") }, new UniformGranularitySpec(Granularities.MONTH, Granularities.DAY, ImmutableList.<Interval>of(interval)), HadoopDruidIndexerConfig.JSON_MAPPER), new HadoopIOConfig(ImmutableMap.<String, Object>of("type", "static", "paths", tmpInputFile.getAbsolutePath()), metadataStorageUpdaterJobSpec, tmpSegmentDir.getAbsolutePath()), new HadoopTuningConfig(scratchFileDir.getAbsolutePath(), null, null, null, null, null, false, false, false, false, null, false, false, null, null, null, false, false)));
metadataStorageTablesConfigSupplier = derbyConnectorRule.metadataTablesConfigSupplier();
connector = derbyConnectorRule.getConnector();
try {
connector.getDBI().withHandle(new HandleCallback<Void>() {
@Override
public Void withHandle(Handle handle) throws Exception {
handle.execute("DROP TABLE druid_segments");
return null;
}
});
} catch (CallbackFailedException e) {
// Who cares
}
List<Jobby> jobs = ImmutableList.of(new Jobby() {
@Override
public boolean run() {
connector.createSegmentTable(metadataStorageUpdaterJobSpec.getSegmentTable());
return true;
}
}, new HadoopDruidDetermineConfigurationJob(hadoopDruidIndexerConfig), new HadoopDruidIndexerJob(hadoopDruidIndexerConfig, new SQLMetadataStorageUpdaterJobHandler(connector)));
JobHelper.runJobs(jobs, hadoopDruidIndexerConfig);
}
Aggregations