use of java.util.HashMap in project druid by druid-io.
the class VarianceTimeseriesQueryTest method testTimeseriesWithNullFilterOnNonExistentDimension.
@Test
public void testTimeseriesWithNullFilterOnNonExistentDimension() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(VarianceTestHelper.dataSource).granularity(VarianceTestHelper.dayGran).filters("bobby", null).intervals(VarianceTestHelper.firstToThird).aggregators(VarianceTestHelper.commonPlusVarAggregators).postAggregators(Arrays.<PostAggregator>asList(VarianceTestHelper.addRowsIndexConstant, VarianceTestHelper.stddevOfIndexPostAggr)).descending(descending).build();
List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<>(new DateTime("2011-04-01"), new TimeseriesResultValue(VarianceTestHelper.of("rows", 13L, "index", 6626.151596069336, "addRowsIndexConstant", 6640.151596069336, "uniques", VarianceTestHelper.UNIQUES_9, "index_var", descending ? 368885.6897238851 : 368885.689155086, "index_stddev", descending ? 607.3596049490657 : 607.35960448081))), new Result<>(new DateTime("2011-04-02"), new TimeseriesResultValue(VarianceTestHelper.of("rows", 13L, "index", 5833.2095947265625, "addRowsIndexConstant", 5847.2095947265625, "uniques", VarianceTestHelper.UNIQUES_9, "index_var", descending ? 259061.6037088883 : 259061.60216419376, "index_stddev", descending ? 508.9809463122252 : 508.98094479478675))));
Iterable<Result<TimeseriesResultValue>> results = Sequences.toList(runner.run(query, new HashMap<String, Object>()), Lists.<Result<TimeseriesResultValue>>newArrayList());
assertExpectedResults(expectedResults, results);
}
use of java.util.HashMap in project spring-boot-admin by codecentric.
the class HipchatNotifier method createHipChatNotification.
protected HttpEntity<Map<String, Object>> createHipChatNotification(ClientApplicationEvent event) {
Map<String, Object> body = new HashMap<>();
body.put("color", getColor(event));
body.put("message", getMessage(event));
body.put("notify", getNotify());
body.put("message_format", "html");
HttpHeaders headers = new HttpHeaders();
headers.setContentType(MediaType.APPLICATION_JSON);
return new HttpEntity<>(body, headers);
}
use of java.util.HashMap in project webmagic by code4craft.
the class SeleniumTest method testSelenium.
@Ignore("need chrome driver")
@Test
public void testSelenium() {
System.getProperties().setProperty("webdriver.chrome.driver", "/Users/yihua/Downloads/chromedriver");
Map<String, Object> contentSettings = new HashMap<String, Object>();
contentSettings.put("images", 2);
Map<String, Object> preferences = new HashMap<String, Object>();
preferences.put("profile.default_content_settings", contentSettings);
DesiredCapabilities caps = DesiredCapabilities.chrome();
caps.setCapability("chrome.prefs", preferences);
caps.setCapability("chrome.switches", Arrays.asList("--user-data-dir=/Users/yihua/temp/chrome"));
WebDriver webDriver = new ChromeDriver(caps);
webDriver.get("http://huaban.com/");
WebElement webElement = webDriver.findElement(By.xpath("/html"));
System.out.println(webElement.getAttribute("outerHTML"));
webDriver.close();
}
use of java.util.HashMap in project elasticsearch by elastic.
the class XContentSettingsLoader method load.
public Map<String, String> load(XContentParser jp) throws IOException {
StringBuilder sb = new StringBuilder();
Map<String, String> settings = new HashMap<>();
List<String> path = new ArrayList<>();
XContentParser.Token token = jp.nextToken();
if (token == null) {
return settings;
}
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("malformed, expected settings to start with 'object', instead was [{}]", token);
}
serializeObject(settings, sb, path, jp, null);
// ensure we reached the end of the stream
XContentParser.Token lastToken = null;
try {
while (!jp.isClosed() && (lastToken = jp.nextToken()) == null) ;
} catch (Exception e) {
throw new ElasticsearchParseException("malformed, expected end of settings but encountered additional content starting at line number: [{}], " + "column number: [{}]", e, jp.getTokenLocation().lineNumber, jp.getTokenLocation().columnNumber);
}
if (lastToken != null) {
throw new ElasticsearchParseException("malformed, expected end of settings but encountered additional content starting at line number: [{}], " + "column number: [{}]", jp.getTokenLocation().lineNumber, jp.getTokenLocation().columnNumber);
}
return settings;
}
use of java.util.HashMap in project elasticsearch by elastic.
the class Engine method getSegmentInfo.
protected Segment[] getSegmentInfo(SegmentInfos lastCommittedSegmentInfos, boolean verbose) {
ensureOpen();
Map<String, Segment> segments = new HashMap<>();
// first, go over and compute the search ones...
Searcher searcher = acquireSearcher("segments");
try {
for (LeafReaderContext reader : searcher.reader().leaves()) {
SegmentCommitInfo info = segmentReader(reader.reader()).getSegmentInfo();
assert !segments.containsKey(info.info.name);
Segment segment = new Segment(info.info.name);
segment.search = true;
segment.docCount = reader.reader().numDocs();
segment.delDocCount = reader.reader().numDeletedDocs();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
final SegmentReader segmentReader = segmentReader(reader.reader());
segment.memoryInBytes = segmentReader.ramBytesUsed();
if (verbose) {
segment.ramTree = Accountables.namedAccountable("root", segmentReader);
}
// TODO: add more fine grained mem stats values to per segment info here
segments.put(info.info.name, segment);
}
} finally {
searcher.close();
}
// now, correlate or add the committed ones...
if (lastCommittedSegmentInfos != null) {
SegmentInfos infos = lastCommittedSegmentInfos;
for (SegmentCommitInfo info : infos) {
Segment segment = segments.get(info.info.name);
if (segment == null) {
segment = new Segment(info.info.name);
segment.search = false;
segment.committed = true;
segment.docCount = info.info.maxDoc();
segment.delDocCount = info.getDelCount();
segment.version = info.info.getVersion();
segment.compound = info.info.getUseCompoundFile();
try {
segment.sizeInBytes = info.sizeInBytes();
} catch (IOException e) {
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
}
segments.put(info.info.name, segment);
} else {
segment.committed = true;
}
}
}
Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
Arrays.sort(segmentsArr, new Comparator<Segment>() {
@Override
public int compare(Segment o1, Segment o2) {
return (int) (o1.getGeneration() - o2.getGeneration());
}
});
return segmentsArr;
}
Aggregations