use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class LimitRequestsFilterTest method createAndStartRequestThread.
private CountDownLatch createAndStartRequestThread(LimitRequestsFilter filter, ServletRequest req, HttpServletResponse resp) {
CountDownLatch latch = new CountDownLatch(1);
new Thread(() -> {
try {
filter.doFilter(req, resp, new TestFilterChain(latch));
} catch (Exception e) {
throw new ISE(e, "exception");
}
}).start();
return latch;
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegmentBatch.
/**
* Attempts to insert a single segment to the database. If the segment already exists, will do nothing; although,
* this checking is imperfect and callers must be prepared to retry their entire transaction on exceptions.
*
* @return DataSegment set inserted
*/
private Set<DataSegment> announceHistoricalSegmentBatch(final Handle handle, final Set<DataSegment> segments, final Set<DataSegment> usedSegments) throws IOException {
final Set<DataSegment> toInsertSegments = new HashSet<>();
try {
Set<String> existedSegments = segmentExistsBatch(handle, segments);
log.info("Found these segments already exist in DB: %s", existedSegments);
for (DataSegment segment : segments) {
if (!existedSegments.contains(segment.getId().toString())) {
toInsertSegments.add(segment);
}
}
// SELECT -> INSERT can fail due to races; callers must be prepared to retry.
// Avoiding ON DUPLICATE KEY since it's not portable.
// Avoiding try/catch since it may cause inadvertent transaction-splitting.
final List<List<DataSegment>> partitionedSegments = Lists.partition(new ArrayList<>(toInsertSegments), MAX_NUM_SEGMENTS_TO_ANNOUNCE_AT_ONCE);
PreparedBatch preparedBatch = handle.prepareBatch(StringUtils.format("INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)", dbTables.getSegmentsTable(), connector.getQuoteString()));
for (List<DataSegment> partition : partitionedSegments) {
for (DataSegment segment : partition) {
preparedBatch.add().bind("id", segment.getId().toString()).bind("dataSource", segment.getDataSource()).bind("created_date", DateTimes.nowUtc().toString()).bind("start", segment.getInterval().getStart().toString()).bind("end", segment.getInterval().getEnd().toString()).bind("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true).bind("version", segment.getVersion()).bind("used", usedSegments.contains(segment)).bind("payload", jsonMapper.writeValueAsBytes(segment));
}
final int[] affectedRows = preparedBatch.execute();
final boolean succeeded = Arrays.stream(affectedRows).allMatch(eachAffectedRows -> eachAffectedRows == 1);
if (succeeded) {
log.infoSegments(partition, "Published segments to DB");
} else {
final List<DataSegment> failedToPublish = IntStream.range(0, partition.size()).filter(i -> affectedRows[i] != 1).mapToObj(partition::get).collect(Collectors.toList());
throw new ISE("Failed to publish segments to DB: %s", SegmentUtils.commaSeparatedIdentifiers(failedToPublish));
}
}
} catch (Exception e) {
log.errorSegments(segments, "Exception inserting segments");
throw e;
}
return toInsertSegments;
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class CuratorDruidNodeDiscoveryProvider method stop.
@LifecycleStop
public void stop() throws IOException {
if (!lifecycleLock.canStop()) {
throw new ISE("can't stop.");
}
log.debug("Stopping.");
Closer closer = Closer.create();
closer.registerAll(nodeRoleWatchers.values());
closer.registerAll(nodeDiscoverers);
CloseableUtils.closeAll(closer, listenerExecutor::shutdownNow);
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class DruidLeaderClient method findCurrentLeader.
public String findCurrentLeader() {
Preconditions.checkState(lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS));
final StringFullResponseHolder responseHolder;
try {
responseHolder = go(makeRequest(HttpMethod.GET, leaderRequestPath));
} catch (Exception ex) {
throw new ISE(ex, "Couldn't find leader.");
}
if (responseHolder.getStatus().getCode() == 200) {
String leaderUrl = responseHolder.getContent();
// verify this is valid url
try {
URL validatedUrl = new URL(leaderUrl);
currentKnownLeader.set(leaderUrl);
// the rule of ignoring new URL(leaderUrl) object.
return validatedUrl.toString();
} catch (MalformedURLException ex) {
log.error(ex, "Received malformed leader url[%s].", leaderUrl);
}
}
throw new ISE("Couldn't find leader, failed response status is [%s] and content [%s].", responseHolder.getStatus().getCode(), responseHolder.getContent());
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class QueryableIndexStorageAdapter method makeVectorCursor.
@Override
@Nullable
public VectorCursor makeVectorCursor(@Nullable final Filter filter, final Interval interval, final VirtualColumns virtualColumns, final boolean descending, final int vectorSize, @Nullable final QueryMetrics<?> queryMetrics) {
if (!canVectorize(filter, virtualColumns, descending)) {
throw new ISE("Cannot vectorize. Check 'canVectorize' before calling 'makeVectorCursor'.");
}
if (queryMetrics != null) {
queryMetrics.vectorized(true);
}
final Interval actualInterval = computeCursorInterval(Granularities.ALL, interval);
if (actualInterval == null) {
return null;
}
final ColumnSelectorBitmapIndexSelector bitmapIndexSelector = makeBitmapIndexSelector(virtualColumns);
final FilterAnalysis filterAnalysis = analyzeFilter(filter, bitmapIndexSelector, queryMetrics);
return new QueryableIndexCursorSequenceBuilder(index, actualInterval, virtualColumns, filterAnalysis.getPreFilterBitmap(), getMinTime().getMillis(), getMaxTime().getMillis(), descending, filterAnalysis.getPostFilter(), bitmapIndexSelector).buildVectorized(vectorSize > 0 ? vectorSize : DEFAULT_VECTOR_SIZE);
}
Aggregations