use of com.google.common.collect.MapMaker in project druid by druid-io.
the class RetryQueryRunnerTest method testNoDuplicateRetry.
@Test
public void testNoDuplicateRetry() throws Exception {
Map<String, Object> context = new MapMaker().makeMap();
context.put("count", 0);
context.put(Result.MISSING_SEGMENTS_KEY, Lists.newArrayList());
RetryQueryRunner<Result<TimeseriesResultValue>> runner = new RetryQueryRunner<>(new QueryRunner<Result<TimeseriesResultValue>>() {
@Override
public Sequence<Result<TimeseriesResultValue>> run(Query<Result<TimeseriesResultValue>> query, Map<String, Object> context) {
if ((int) context.get("count") == 0) {
// assume 2 missing segments at first run
((List) context.get(Result.MISSING_SEGMENTS_KEY)).add(new SegmentDescriptor(new Interval(178888, 1999999), "test", 1));
((List) context.get(Result.MISSING_SEGMENTS_KEY)).add(new SegmentDescriptor(new Interval(178888, 1999999), "test", 2));
context.put("count", 1);
return Sequences.simple(Arrays.asList(new Result<>(new DateTime(), new TimeseriesResultValue(Maps.<String, Object>newHashMap()))));
} else if ((int) context.get("count") == 1) {
// this is first retry
Assert.assertTrue("Should retry with 2 missing segments", ((MultipleSpecificSegmentSpec) ((BaseQuery) query).getQuerySegmentSpec()).getDescriptors().size() == 2);
// assume only left 1 missing at first retry
((List) context.get(Result.MISSING_SEGMENTS_KEY)).add(new SegmentDescriptor(new Interval(178888, 1999999), "test", 2));
context.put("count", 2);
return Sequences.simple(Arrays.asList(new Result<>(new DateTime(), new TimeseriesResultValue(Maps.<String, Object>newHashMap()))));
} else {
// this is second retry
Assert.assertTrue("Should retry with 1 missing segments", ((MultipleSpecificSegmentSpec) ((BaseQuery) query).getQuerySegmentSpec()).getDescriptors().size() == 1);
// assume no more missing at second retry
context.put("count", 3);
return Sequences.simple(Arrays.asList(new Result<>(new DateTime(), new TimeseriesResultValue(Maps.<String, Object>newHashMap()))));
}
}
}, (QueryToolChest) new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new RetryQueryRunnerConfig() {
private int numTries = 2;
private boolean returnPartialResults = false;
public int getNumTries() {
return numTries;
}
public boolean returnPartialResults() {
return returnPartialResults;
}
}, jsonMapper);
Iterable<Result<TimeseriesResultValue>> actualResults = Sequences.toList(runner.run(query, context), Lists.<Result<TimeseriesResultValue>>newArrayList());
Assert.assertTrue("Should return a list with 3 elements", ((List) actualResults).size() == 3);
Assert.assertTrue("Should have nothing in missingSegment list", ((List) context.get(Result.MISSING_SEGMENTS_KEY)).size() == 0);
}
use of com.google.common.collect.MapMaker in project druid by druid-io.
the class RetryQueryRunnerTest method testRetryMultiple.
@Test
public void testRetryMultiple() throws Exception {
Map<String, Object> context = new MapMaker().makeMap();
context.put("count", 0);
context.put(Result.MISSING_SEGMENTS_KEY, Lists.newArrayList());
RetryQueryRunner<Result<TimeseriesResultValue>> runner = new RetryQueryRunner<>(new QueryRunner<Result<TimeseriesResultValue>>() {
@Override
public Sequence<Result<TimeseriesResultValue>> run(Query<Result<TimeseriesResultValue>> query, Map<String, Object> context) {
if ((int) context.get("count") < 3) {
((List) context.get(Result.MISSING_SEGMENTS_KEY)).add(new SegmentDescriptor(new Interval(178888, 1999999), "test", 1));
context.put("count", (int) context.get("count") + 1);
return Sequences.empty();
} else {
return Sequences.simple(Arrays.asList(new Result<>(new DateTime(), new TimeseriesResultValue(Maps.<String, Object>newHashMap()))));
}
}
}, (QueryToolChest) new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()), new RetryQueryRunnerConfig() {
private int numTries = 4;
private boolean returnPartialResults = true;
public int getNumTries() {
return numTries;
}
public boolean returnPartialResults() {
return returnPartialResults;
}
}, jsonMapper);
Iterable<Result<TimeseriesResultValue>> actualResults = Sequences.toList(runner.run(query, context), Lists.<Result<TimeseriesResultValue>>newArrayList());
Assert.assertTrue("Should return a list with one element", ((List) actualResults).size() == 1);
Assert.assertTrue("Should have nothing in missingSegment list", ((List) context.get(Result.MISSING_SEGMENTS_KEY)).size() == 0);
}
use of com.google.common.collect.MapMaker in project druid by druid-io.
the class DataSourceMetadataQueryTest method testMaxIngestedEventTime.
@Test
public void testMaxIngestedEventTime() throws Exception {
final IncrementalIndex rtIndex = new OnheapIncrementalIndex(0L, Granularities.NONE, new AggregatorFactory[] { new CountAggregatorFactory("count") }, 1000);
;
final QueryRunner runner = QueryRunnerTestHelper.makeQueryRunner((QueryRunnerFactory) new DataSourceMetadataQueryRunnerFactory(QueryRunnerTestHelper.NOOP_QUERYWATCHER), new IncrementalIndexSegment(rtIndex, "test"), null);
DateTime timestamp = new DateTime(System.currentTimeMillis());
rtIndex.add(new MapBasedInputRow(timestamp.getMillis(), ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "x")));
DataSourceMetadataQuery dataSourceMetadataQuery = Druids.newDataSourceMetadataQueryBuilder().dataSource("testing").build();
Map<String, Object> context = new MapMaker().makeMap();
context.put(Result.MISSING_SEGMENTS_KEY, Lists.newArrayList());
Iterable<Result<DataSourceMetadataResultValue>> results = Sequences.toList(runner.run(dataSourceMetadataQuery, context), Lists.<Result<DataSourceMetadataResultValue>>newArrayList());
DataSourceMetadataResultValue val = results.iterator().next().getValue();
DateTime maxIngestedEventTime = val.getMaxIngestedEventTime();
Assert.assertEquals(timestamp, maxIngestedEventTime);
}
use of com.google.common.collect.MapMaker in project druid by druid-io.
the class TimeBoundaryQueryRunnerTest method testTimeBoundaryMin.
@Test
@SuppressWarnings("unchecked")
public void testTimeBoundaryMin() {
TimeBoundaryQuery timeBoundaryQuery = Druids.newTimeBoundaryQueryBuilder().dataSource("testing").bound(TimeBoundaryQuery.MIN_TIME).build();
Map<String, Object> context = new MapMaker().makeMap();
context.put(Result.MISSING_SEGMENTS_KEY, Lists.newArrayList());
Iterable<Result<TimeBoundaryResultValue>> results = Sequences.toList(runner.run(timeBoundaryQuery, context), Lists.<Result<TimeBoundaryResultValue>>newArrayList());
TimeBoundaryResultValue val = results.iterator().next().getValue();
DateTime minTime = val.getMinTime();
DateTime maxTime = val.getMaxTime();
Assert.assertEquals(new DateTime("2011-01-12T00:00:00.000Z"), minTime);
Assert.assertNull(maxTime);
}
use of com.google.common.collect.MapMaker in project hbase by apache.
the class AccessController method start.
/* ---- MasterObserver implementation ---- */
@Override
public void start(CoprocessorEnvironment env) throws IOException {
CompoundConfiguration conf = new CompoundConfiguration();
conf.add(env.getConfiguration());
authorizationEnabled = isAuthorizationSupported(conf);
if (!authorizationEnabled) {
LOG.warn("The AccessController has been loaded with authorization checks disabled.");
}
shouldCheckExecPermission = conf.getBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, AccessControlConstants.DEFAULT_EXEC_PERMISSION_CHECKS);
cellFeaturesEnabled = (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS);
if (!cellFeaturesEnabled) {
LOG.info("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS + " is required to persist cell ACLs. Consider setting " + HFile.FORMAT_VERSION_KEY + " accordingly.");
}
ZooKeeperWatcher zk = null;
if (env instanceof MasterCoprocessorEnvironment) {
// if running on HMaster
MasterCoprocessorEnvironment mEnv = (MasterCoprocessorEnvironment) env;
zk = mEnv.getMasterServices().getZooKeeper();
} else if (env instanceof RegionServerCoprocessorEnvironment) {
RegionServerCoprocessorEnvironment rsEnv = (RegionServerCoprocessorEnvironment) env;
zk = rsEnv.getRegionServerServices().getZooKeeper();
} else if (env instanceof RegionCoprocessorEnvironment) {
// if running at region
regionEnv = (RegionCoprocessorEnvironment) env;
conf.addStringMap(regionEnv.getRegion().getTableDesc().getConfiguration());
zk = regionEnv.getRegionServerServices().getZooKeeper();
compatibleEarlyTermination = conf.getBoolean(AccessControlConstants.CF_ATTRIBUTE_EARLY_OUT, AccessControlConstants.DEFAULT_ATTRIBUTE_EARLY_OUT);
}
// set the user-provider.
this.userProvider = UserProvider.instantiate(env.getConfiguration());
// throw RuntimeException so that the coprocessor is unloaded.
if (zk != null) {
try {
this.authManager = TableAuthManager.getOrCreate(zk, env.getConfiguration());
} catch (IOException ioe) {
throw new RuntimeException("Error obtaining TableAuthManager", ioe);
}
} else {
throw new RuntimeException("Error obtaining TableAuthManager, zk found null.");
}
tableAcls = new MapMaker().weakValues().makeMap();
}
Aggregations