use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class BaseFilterTest method selectCountUsingVectorizedFilteredAggregator.
private long selectCountUsingVectorizedFilteredAggregator(final DimFilter dimFilter) {
Preconditions.checkState(makeFilter(dimFilter).canVectorizeMatcher(adapter), "Cannot vectorize filter: %s", dimFilter);
try (final VectorCursor cursor = makeVectorCursor(null)) {
final FilteredAggregatorFactory aggregatorFactory = new FilteredAggregatorFactory(new CountAggregatorFactory("count"), maybeOptimize(dimFilter));
final VectorAggregator aggregator = aggregatorFactory.factorizeVector(cursor.getColumnSelectorFactory());
final ByteBuffer buf = ByteBuffer.allocate(aggregatorFactory.getMaxIntermediateSizeWithNulls() * 2);
// Use two slots: one for each form of aggregate.
aggregator.init(buf, 0);
aggregator.init(buf, aggregatorFactory.getMaxIntermediateSizeWithNulls());
for (; !cursor.isDone(); cursor.advance()) {
aggregator.aggregate(buf, 0, 0, cursor.getCurrentVectorSize());
final int[] positions = new int[cursor.getCurrentVectorSize()];
Arrays.fill(positions, aggregatorFactory.getMaxIntermediateSizeWithNulls());
final int[] allRows = new int[cursor.getCurrentVectorSize()];
for (int i = 0; i < allRows.length; i++) {
allRows[i] = i;
}
aggregator.aggregate(buf, cursor.getCurrentVectorSize(), positions, allRows, 0);
}
final long val1 = (long) aggregator.get(buf, 0);
final long val2 = (long) aggregator.get(buf, aggregatorFactory.getMaxIntermediateSizeWithNulls());
if (val1 != val2) {
throw new ISE("Oh no, val1[%d] != val2[%d]", val1, val2);
}
return val1;
}
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinator method announceHistoricalSegmentBatch.
/**
* Attempts to insert a single segment to the database. If the segment already exists, will do nothing; although,
* this checking is imperfect and callers must be prepared to retry their entire transaction on exceptions.
*
* @return DataSegment set inserted
*/
private Set<DataSegment> announceHistoricalSegmentBatch(final Handle handle, final Set<DataSegment> segments, final Set<DataSegment> usedSegments) throws IOException {
final Set<DataSegment> toInsertSegments = new HashSet<>();
try {
Set<String> existedSegments = segmentExistsBatch(handle, segments);
log.info("Found these segments already exist in DB: %s", existedSegments);
for (DataSegment segment : segments) {
if (!existedSegments.contains(segment.getId().toString())) {
toInsertSegments.add(segment);
}
}
// SELECT -> INSERT can fail due to races; callers must be prepared to retry.
// Avoiding ON DUPLICATE KEY since it's not portable.
// Avoiding try/catch since it may cause inadvertent transaction-splitting.
final List<List<DataSegment>> partitionedSegments = Lists.partition(new ArrayList<>(toInsertSegments), MAX_NUM_SEGMENTS_TO_ANNOUNCE_AT_ONCE);
PreparedBatch preparedBatch = handle.prepareBatch(StringUtils.format("INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s, partitioned, version, used, payload) " + "VALUES (:id, :dataSource, :created_date, :start, :end, :partitioned, :version, :used, :payload)", dbTables.getSegmentsTable(), connector.getQuoteString()));
for (List<DataSegment> partition : partitionedSegments) {
for (DataSegment segment : partition) {
preparedBatch.add().bind("id", segment.getId().toString()).bind("dataSource", segment.getDataSource()).bind("created_date", DateTimes.nowUtc().toString()).bind("start", segment.getInterval().getStart().toString()).bind("end", segment.getInterval().getEnd().toString()).bind("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true).bind("version", segment.getVersion()).bind("used", usedSegments.contains(segment)).bind("payload", jsonMapper.writeValueAsBytes(segment));
}
final int[] affectedRows = preparedBatch.execute();
final boolean succeeded = Arrays.stream(affectedRows).allMatch(eachAffectedRows -> eachAffectedRows == 1);
if (succeeded) {
log.infoSegments(partition, "Published segments to DB");
} else {
final List<DataSegment> failedToPublish = IntStream.range(0, partition.size()).filter(i -> affectedRows[i] != 1).mapToObj(partition::get).collect(Collectors.toList());
throw new ISE("Failed to publish segments to DB: %s", SegmentUtils.commaSeparatedIdentifiers(failedToPublish));
}
}
} catch (Exception e) {
log.errorSegments(segments, "Exception inserting segments");
throw e;
}
return toInsertSegments;
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class CuratorDruidNodeDiscoveryProvider method stop.
@LifecycleStop
public void stop() throws IOException {
if (!lifecycleLock.canStop()) {
throw new ISE("can't stop.");
}
log.debug("Stopping.");
Closer closer = Closer.create();
closer.registerAll(nodeRoleWatchers.values());
closer.registerAll(nodeDiscoverers);
CloseableUtils.closeAll(closer, listenerExecutor::shutdownNow);
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class DruidLeaderClient method findCurrentLeader.
public String findCurrentLeader() {
Preconditions.checkState(lifecycleLock.awaitStarted(1, TimeUnit.MILLISECONDS));
final StringFullResponseHolder responseHolder;
try {
responseHolder = go(makeRequest(HttpMethod.GET, leaderRequestPath));
} catch (Exception ex) {
throw new ISE(ex, "Couldn't find leader.");
}
if (responseHolder.getStatus().getCode() == 200) {
String leaderUrl = responseHolder.getContent();
// verify this is valid url
try {
URL validatedUrl = new URL(leaderUrl);
currentKnownLeader.set(leaderUrl);
// the rule of ignoring new URL(leaderUrl) object.
return validatedUrl.toString();
} catch (MalformedURLException ex) {
log.error(ex, "Received malformed leader url[%s].", leaderUrl);
}
}
throw new ISE("Couldn't find leader, failed response status is [%s] and content [%s].", responseHolder.getStatus().getCode(), responseHolder.getContent());
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class StreamAppenderator method push.
@Override
public ListenableFuture<SegmentsAndCommitMetadata> push(final Collection<SegmentIdWithShardSpec> identifiers, @Nullable final Committer committer, final boolean useUniquePath) {
final Map<SegmentIdWithShardSpec, Sink> theSinks = new HashMap<>();
AtomicLong pushedHydrantsCount = new AtomicLong();
for (final SegmentIdWithShardSpec identifier : identifiers) {
final Sink sink = sinks.get(identifier);
if (sink == null) {
throw new ISE("No sink for identifier: %s", identifier);
}
theSinks.put(identifier, sink);
if (sink.finishWriting()) {
totalRows.addAndGet(-sink.getNumRows());
}
// count hydrants for stats:
pushedHydrantsCount.addAndGet(Iterables.size(sink));
}
return Futures.transform(// segments.
persistAll(committer), (Function<Object, SegmentsAndCommitMetadata>) commitMetadata -> {
final List<DataSegment> dataSegments = new ArrayList<>();
log.info("Preparing to push (stats): processed rows: [%d], sinks: [%d], fireHydrants (across sinks): [%d]", rowIngestionMeters.getProcessed(), theSinks.size(), pushedHydrantsCount.get());
log.debug("Building and pushing segments: %s", theSinks.keySet().stream().map(SegmentIdWithShardSpec::toString).collect(Collectors.joining(", ")));
for (Map.Entry<SegmentIdWithShardSpec, Sink> entry : theSinks.entrySet()) {
if (droppingSinks.contains(entry.getKey())) {
log.warn("Skipping push of currently-dropping sink[%s]", entry.getKey());
continue;
}
final DataSegment dataSegment = mergeAndPush(entry.getKey(), entry.getValue(), useUniquePath);
if (dataSegment != null) {
dataSegments.add(dataSegment);
} else {
log.warn("mergeAndPush[%s] returned null, skipping.", entry.getKey());
}
}
log.info("Push complete...");
return new SegmentsAndCommitMetadata(dataSegments, commitMetadata);
}, pushExecutor);
}
Aggregations