use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class BatchDataSegmentAnnouncer method announceSegments.
@Override
public void announceSegments(Iterable<DataSegment> segments) throws IOException {
SegmentZNode segmentZNode = new SegmentZNode(makeServedSegmentPath());
Set<DataSegment> batch = new HashSet<>();
List<DataSegmentChangeRequest> changesBatch = new ArrayList<>();
int byteSize = 0;
int count = 0;
synchronized (lock) {
for (DataSegment ds : segments) {
if (segmentLookup.containsKey(ds)) {
log.info("Skipping announcement of segment [%s]. Announcement exists already.", ds.getId());
return;
}
DataSegment segment = segmentTransformer.apply(ds);
changesBatch.add(new SegmentChangeRequestLoad(segment));
if (isSkipSegmentAnnouncementOnZk) {
segmentLookup.put(segment, dummyZnode);
continue;
}
int newBytesLen = jsonMapper.writeValueAsBytes(segment).length;
if (newBytesLen > config.getMaxBytesPerNode()) {
throw new ISE("byte size %,d exceeds %,d", newBytesLen, config.getMaxBytesPerNode());
}
if (count >= config.getSegmentsPerNode() || byteSize + newBytesLen > config.getMaxBytesPerNode()) {
segmentZNode.addSegments(batch);
announcer.announce(segmentZNode.getPath(), segmentZNode.getBytes());
segmentZNode = new SegmentZNode(makeServedSegmentPath());
batch = new HashSet<>();
count = 0;
byteSize = 0;
}
log.info("Announcing segment[%s] at path[%s]", segment.getId(), segmentZNode.getPath());
segmentLookup.put(segment, segmentZNode);
batch.add(segment);
count++;
byteSize += newBytesLen;
}
}
changes.addChangeRequests(changesBatch);
if (!isSkipSegmentAnnouncementOnZk) {
segmentZNode.addSegments(batch);
announcer.announce(segmentZNode.getPath(), segmentZNode.getBytes());
}
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class LimitRequestsFilterTest method createAndStartRequestThread.
private CountDownLatch createAndStartRequestThread(LimitRequestsFilter filter, ServletRequest req, HttpServletResponse resp) {
CountDownLatch latch = new CountDownLatch(1);
new Thread(() -> {
try {
filter.doFilter(req, resp, new TestFilterChain(latch));
} catch (Exception e) {
throw new ISE(e, "exception");
}
}).start();
return latch;
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class DumpSegment method runBitmaps.
private void runBitmaps(final Injector injector, final QueryableIndex index) throws IOException {
final ObjectMapper objectMapper = injector.getInstance(Key.get(ObjectMapper.class, Json.class));
final BitmapFactory bitmapFactory = index.getBitmapFactoryForDimensions();
final BitmapSerdeFactory bitmapSerdeFactory;
if (bitmapFactory instanceof ConciseBitmapFactory) {
bitmapSerdeFactory = new ConciseBitmapSerdeFactory();
} else if (bitmapFactory instanceof RoaringBitmapFactory) {
bitmapSerdeFactory = new RoaringBitmapSerdeFactory(null);
} else {
throw new ISE("Don't know which BitmapSerdeFactory to use for BitmapFactory[%s]!", bitmapFactory.getClass().getName());
}
final List<String> columnNames = getColumnsToInclude(index);
withOutputStream(new Function<OutputStream, Object>() {
@Override
public Object apply(final OutputStream out) {
try (final JsonGenerator jg = objectMapper.getFactory().createGenerator(out)) {
jg.writeStartObject();
{
jg.writeObjectField("bitmapSerdeFactory", bitmapSerdeFactory);
jg.writeFieldName("bitmaps");
jg.writeStartObject();
{
for (final String columnName : columnNames) {
final ColumnHolder columnHolder = index.getColumnHolder(columnName);
final BitmapIndex bitmapIndex = columnHolder.getBitmapIndex();
if (bitmapIndex == null) {
jg.writeNullField(columnName);
} else {
jg.writeFieldName(columnName);
jg.writeStartObject();
for (int i = 0; i < bitmapIndex.getCardinality(); i++) {
String val = bitmapIndex.getValue(i);
// respect nulls if they are present in the dictionary
jg.writeFieldName(val == null ? "null" : val);
final ImmutableBitmap bitmap = bitmapIndex.getBitmap(i);
if (decompressBitmaps) {
jg.writeStartArray();
final IntIterator iterator = bitmap.iterator();
while (iterator.hasNext()) {
final int rowNum = iterator.next();
jg.writeNumber(rowNum);
}
jg.writeEndArray();
} else {
byte[] bytes = bitmapSerdeFactory.getObjectStrategy().toBytes(bitmap);
if (bytes != null) {
jg.writeBinary(bytes);
}
}
}
jg.writeEndObject();
}
}
}
jg.writeEndObject();
}
jg.writeEndObject();
} catch (IOException e) {
throw new RuntimeException(e);
}
return null;
}
});
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class DumpSegment method run.
@Override
public void run() {
final Injector injector = makeInjector();
final IndexIO indexIO = injector.getInstance(IndexIO.class);
final DumpType dumpType;
try {
dumpType = DumpType.valueOf(StringUtils.toUpperCase(dumpTypeString));
} catch (Exception e) {
throw new IAE("Not a valid dump type: %s", dumpTypeString);
}
try (final QueryableIndex index = indexIO.loadIndex(new File(directory))) {
switch(dumpType) {
case ROWS:
runDump(injector, index);
break;
case METADATA:
runMetadata(injector, index);
break;
case BITMAPS:
runBitmaps(injector, index);
break;
default:
throw new ISE("dumpType[%s] has no handler", dumpType);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
}
use of org.apache.druid.java.util.common.ISE in project druid by druid-io.
the class DruidMeta method openDruidConnection.
private DruidConnection openDruidConnection(final String connectionId, final Map<String, Object> context) {
if (connectionCount.incrementAndGet() > config.getMaxConnections()) {
// O(connections) but we don't expect this to happen often (it's a last-ditch effort to clear out
// abandoned connections) or to have too many connections.
final Iterator<Map.Entry<String, DruidConnection>> entryIterator = connections.entrySet().iterator();
while (entryIterator.hasNext()) {
final Map.Entry<String, DruidConnection> entry = entryIterator.next();
if (entry.getValue().closeIfEmpty()) {
entryIterator.remove();
// Removed a connection, decrement the counter.
connectionCount.decrementAndGet();
break;
}
}
if (connectionCount.get() > config.getMaxConnections()) {
// We aren't going to make a connection after all.
connectionCount.decrementAndGet();
throw logFailure(new ISE("Too many connections"), "Too many connections, limit is[%,d] per broker", config.getMaxConnections());
}
}
final DruidConnection putResult = connections.putIfAbsent(connectionId, new DruidConnection(connectionId, config.getMaxStatementsPerConnection(), context));
if (putResult != null) {
// Didn't actually insert the connection.
connectionCount.decrementAndGet();
throw logFailure(new ISE("Connection[%s] already open.", connectionId));
}
LOG.debug("Connection[%s] opened.", connectionId);
// Call getDruidConnection to start the timeout timer.
return getDruidConnection(connectionId);
}
Aggregations