use of mondrian.rolap.cache.SegmentCacheIndex in project mondrian by pentaho.
the class SegmentLoader method createExecuteSql.
/**
* Creates and executes a SQL statement to retrieve the set of cells
* specified by a GroupingSetsList.
*
* <p>This method may be overridden in tests.
*
* @param cellRequestCount Number of missed cells that led to this request
* @param groupingSetsList Grouping
* @param compoundPredicateList Compound predicate list
* @return An executed SQL statement, or null
*/
SqlStatement createExecuteSql(int cellRequestCount, final GroupingSetsList groupingSetsList, List<StarPredicate> compoundPredicateList) {
RolapStar star = groupingSetsList.getStar();
Pair<String, List<SqlStatement.Type>> pair = AggregationManager.generateSql(groupingSetsList, compoundPredicateList);
final Locus locus = new SqlStatement.StatementLocus(Locus.peek().execution, "Segment.load", "Error while loading segment", SqlStatementEvent.Purpose.CELL_SEGMENT, cellRequestCount);
// When caching is enabled, we must register the SQL statement
// in the index. We don't want to cancel SQL statements that are shared
// across threads unless it is safe.
final Util.Functor1<Void, Statement> callbackWithCaching = new Util.Functor1<Void, Statement>() {
public Void apply(final Statement stmt) {
cacheMgr.execute(new SegmentCacheManager.Command<Void>() {
public Void call() throws Exception {
boolean atLeastOneActive = false;
for (Segment seg : groupingSetsList.getDefaultSegments()) {
final SegmentCacheIndex index = cacheMgr.getIndexRegistry().getIndex(seg.star);
// then.
if (index.contains(seg.getHeader())) {
index.linkSqlStatement(seg.getHeader(), stmt);
atLeastOneActive = true;
}
if (!atLeastOneActive) {
// knows to stop.
throw new AbortException();
}
}
return null;
}
public Locus getLocus() {
return locus;
}
});
return null;
}
};
// When using no cache, we register the SQL statement directly
// with the execution instance for cleanup.
final Util.Functor1<Void, Statement> callbackNoCaching = new Util.Functor1<Void, Statement>() {
public Void apply(final Statement stmt) {
locus.execution.registerStatement(locus, stmt);
return null;
}
};
try {
return RolapUtil.executeQuery(star.getDataSource(), pair.left, pair.right, 0, 0, locus, -1, -1, // cache the segments or not.
MondrianProperties.instance().DisableCaching.get() ? callbackNoCaching : callbackWithCaching);
} catch (Throwable t) {
if (Util.getMatchingCause(t, AbortException.class) != null) {
return null;
} else {
throw new MondrianException("Failed to load segment form SQL", t);
}
}
}
use of mondrian.rolap.cache.SegmentCacheIndex in project mondrian by pentaho.
the class SegmentLoader method load.
/**
* Loads data for all the segments of the GroupingSets. If the grouping sets
* list contains more than one Grouping Set then data is loaded using the
* GROUP BY GROUPING SETS sql. Else if only one grouping set is passed in
* the list data is loaded without using GROUP BY GROUPING SETS sql. If the
* database does not support grouping sets
* {@link mondrian.spi.Dialect#supportsGroupingSets()} then
* grouping sets list should always have only one element in it.
*
* <p>For example, if list has 2 grouping sets with columns A, B, C and B, C
* respectively, then the SQL will be
* "GROUP BY GROUPING SETS ((A, B, C), (B, C))".
*
* <p>Else if the list has only one grouping set then sql would be without
* grouping sets.
*
* <p>The <code>groupingSets</code> list should be topological order, with
* more detailed higher-level grouping sets occurring first. In other words,
* the first element of the list should always be the detailed grouping
* set (default grouping set), followed by grouping sets which can be
* rolled-up on this detailed grouping set.
* In the example (A, B, C) is the detailed grouping set and (B, C) is
* rolled-up using the detailed.
*
* <p>Grouping sets are removed from the {@code groupingSets} list as they
* are loaded.</p>
*
* @param cellRequestCount Number of missed cells that led to this request
* @param groupingSets List of grouping sets whose segments are loaded
* @param compoundPredicateList Compound predicates
* @param segmentFutures List of futures wherein each statement will place
* a list of the segments it has loaded, when it
* completes
*/
public void load(int cellRequestCount, List<GroupingSet> groupingSets, List<StarPredicate> compoundPredicateList, List<Future<Map<Segment, SegmentWithData>>> segmentFutures) {
if (!MondrianProperties.instance().DisableCaching.get()) {
for (GroupingSet groupingSet : groupingSets) {
for (Segment segment : groupingSet.getSegments()) {
final SegmentCacheIndex index = cacheMgr.getIndexRegistry().getIndex(segment.star);
index.add(segment.getHeader(), new SegmentBuilder.StarSegmentConverter(segment.measure, compoundPredicateList), true);
// Make sure that we are registered as a client of
// the segment by invoking getFuture.
Util.discard(index.getFuture(Locus.peek().execution, segment.getHeader()));
}
}
}
try {
segmentFutures.add(cacheMgr.sqlExecutor.submit(new SegmentLoadCommand(Locus.peek(), this, cellRequestCount, groupingSets, compoundPredicateList)));
} catch (Exception e) {
throw new MondrianException(e);
}
}
use of mondrian.rolap.cache.SegmentCacheIndex in project mondrian by pentaho.
the class BatchLoader method loadFromCaches.
/**
* Loads a cell from caches. If the cell is successfully loaded,
* we return true.
*/
private boolean loadFromCaches(final CellRequest request, final AggregationKey key, final SegmentBuilder.SegmentConverterImpl converter) {
if (MondrianProperties.instance().DisableCaching.get()) {
// Caching is disabled. Return always false.
return false;
}
// Is request matched by one of the headers we intend to load?
final Map<String, Comparable> mappedCellValues = request.getMappedCellValues();
final List<String> compoundPredicates = request.getCompoundPredicateStrings();
for (SegmentHeader header : cacheHeaders) {
if (SegmentCacheIndexImpl.matches(header, mappedCellValues, compoundPredicates)) {
// from the segment index, and we'll be back.
return true;
}
}
final RolapStar.Measure measure = request.getMeasure();
final RolapStar star = measure.getStar();
final RolapSchema schema = star.getSchema();
final SegmentCacheIndex index = cacheMgr.getIndexRegistry().getIndex(star);
final List<SegmentHeader> headersInCache = index.locate(schema.getName(), schema.getChecksum(), measure.getCubeName(), measure.getName(), star.getFactTable().getAlias(), request.getConstrainedColumnsBitKey(), mappedCellValues, compoundPredicates);
if (!headersInCache.isEmpty()) {
for (SegmentHeader headerInCache : headersInCache) {
final Future<SegmentBody> future = index.getFuture(locus.execution, headerInCache);
if (future != null) {
// Segment header is in cache, body is being loaded.
// Worker will need to wait for load to complete.
futures.put(headerInCache, future);
} else {
// Segment is in cache.
cacheHeaders.add(headerInCache);
}
index.setConverter(headerInCache.schemaName, headerInCache.schemaChecksum, headerInCache.cubeName, headerInCache.rolapStarFactTableName, headerInCache.measureName, headerInCache.compoundPredicates, converter);
converterMap.put(SegmentCacheIndexImpl.makeConverterKey(request, key), converter);
}
return true;
}
// Aggregator.supportsFastAggregates() to verify.
if (MondrianProperties.instance().EnableInMemoryRollup.get() && measure.getAggregator().supportsFastAggregates(measure.getDatatype()) && measure.getAggregator().getRollup().supportsFastAggregates(measure.getDatatype()) && !isRequestCoveredByRollups(request)) {
// Don't even bother doing a segment lookup if we can't
// rollup that measure.
final List<List<SegmentHeader>> rollup = index.findRollupCandidates(schema.getName(), schema.getChecksum(), measure.getCubeName(), measure.getName(), star.getFactTable().getAlias(), request.getConstrainedColumnsBitKey(), mappedCellValues, request.getCompoundPredicateStrings());
if (!rollup.isEmpty()) {
rollups.add(new RollupInfo(request, rollup));
rollupBitmaps.add(request.getConstrainedColumnsBitKey());
converterMap.put(SegmentCacheIndexImpl.makeConverterKey(request, key), new SegmentBuilder.StarSegmentConverter(measure, key.getCompoundPredicateList()));
return true;
}
}
return false;
}
use of mondrian.rolap.cache.SegmentCacheIndex in project mondrian by pentaho.
the class BatchLoader method loadAggregations.
/**
* Resolves any pending cell reads using the cache. After calling this
* method, all cells requested in a given batch are loaded into this
* statement's local cache.
*
* <p>The method is implemented by making an asynchronous call to the cache
* manager. The result is a list of segments that satisfies every cell
* request.</p>
*
* <p>The client should put the resulting segments into its "query local"
* cache, to ensure that future cells in that segment can be answered
* without a call to the cache manager. (That is probably 1000x faster.)</p>
*
* <p>The cache manager does not inform where client where each segment
* came from. There are several possibilities:</p>
*
* <ul>
* <li>Segment was already in cache (header and body)</li>
* <li>Segment is in the process of being loaded by executing a SQL
* statement (probably due to a request from another client)</li>
* <li>Segment is in an external cache (that is, header is in the cache,
* body is not yet)</li>
* <li>Segment can be created by rolling up one or more cache segments.
* (And of course each of these segments might be "paged out".)</li>
* <li>By executing a SQL {@code GROUP BY} statement</li>
* </ul>
*
* <p>Furthermore, segments in external cache may take some time to retrieve
* (a LAN round trip, say 1 millisecond, is a reasonable guess); and the
* request may fail. (It depends on the cache, but caches are at liberty
* to 'forget' segments.) So, any strategy that relies on cache segments
* should be able to fall back. Even if there are fall backs, only one call
* needs to be made to the cache manager.</p>
*
* @return Whether any aggregations were loaded.
*/
boolean loadAggregations() {
if (!isDirty()) {
return false;
}
// List of futures yielding segments populated by SQL statements. If
// loading requires several iterations, we just append to the list. We
// don't mind if it takes a while for SQL statements to return.
final List<Future<Map<Segment, SegmentWithData>>> sqlSegmentMapFutures = new ArrayList<Future<Map<Segment, SegmentWithData>>>();
final List<CellRequest> cellRequests1 = new ArrayList<CellRequest>(cellRequests);
preloadColumnCardinality(cellRequests1);
for (int iteration = 0; ; ++iteration) {
final BatchLoader.LoadBatchResponse response = cacheMgr.execute(new BatchLoader.LoadBatchCommand(Locus.peek(), cacheMgr, getDialect(), cube, Collections.unmodifiableList(cellRequests1)));
int failureCount = 0;
// Segments that have been retrieved from cache this cycle. Allows
// us to reduce calls to the external cache.
Map<SegmentHeader, SegmentBody> headerBodies = new HashMap<SegmentHeader, SegmentBody>();
// cacheMgr -- it's our cache.
for (SegmentHeader header : response.cacheSegments) {
final SegmentBody body = cacheMgr.compositeCache.get(header);
if (body == null) {
// it on the next iteration.
if (cube.getStar() != null) {
cacheMgr.remove(cube.getStar(), header);
}
++failureCount;
continue;
}
headerBodies.put(header, body);
final SegmentWithData segmentWithData = response.convert(header, body);
segmentWithData.getStar().register(segmentWithData);
}
// Perform each suggested rollup.
//
// TODO this could be improved.
// See http://jira.pentaho.com/browse/MONDRIAN-1195
// Rollups that succeeded. Will tell cache mgr to put the headers
// into the index and the header/bodies in cache.
final Map<SegmentHeader, SegmentBody> succeededRollups = new HashMap<SegmentHeader, SegmentBody>();
for (final BatchLoader.RollupInfo rollup : response.rollups) {
// Gather the required segments.
Map<SegmentHeader, SegmentBody> map = findResidentRollupCandidate(headerBodies, rollup);
if (map == null) {
// all present in the cache.
continue;
}
final Set<String> keepColumns = new HashSet<String>();
for (RolapStar.Column column : rollup.constrainedColumns) {
keepColumns.add(column.getExpression().getGenericExpression());
}
Pair<SegmentHeader, SegmentBody> rollupHeaderBody = SegmentBuilder.rollup(map, keepColumns, rollup.constrainedColumnsBitKey, rollup.measure.getAggregator().getRollup(), rollup.measure.getDatatype());
final SegmentHeader header = rollupHeaderBody.left;
final SegmentBody body = rollupHeaderBody.right;
if (headerBodies.containsKey(header)) {
// We had already created this segment, somehow.
continue;
}
headerBodies.put(header, body);
succeededRollups.put(header, body);
final SegmentWithData segmentWithData = response.convert(header, body);
// Register this segment with the local star.
segmentWithData.getStar().register(segmentWithData);
// Actor thread to ensure thread safety.
if (!MondrianProperties.instance().DisableCaching.get()) {
final Locus locus = Locus.peek();
cacheMgr.execute(new SegmentCacheManager.Command<Void>() {
public Void call() throws Exception {
SegmentCacheIndex index = cacheMgr.getIndexRegistry().getIndex(segmentWithData.getStar());
index.add(segmentWithData.getHeader(), response.converterMap.get(SegmentCacheIndexImpl.makeConverterKey(segmentWithData.getHeader())), true);
index.loadSucceeded(segmentWithData.getHeader(), body);
return null;
}
public Locus getLocus() {
return locus;
}
});
}
}
// Wait for SQL statements to end -- but only if there are no
// failures.
//
// If there are failures, and its the first iteration, it's more
// urgent that we create and execute a follow-up request. We will
// wait for the pending SQL statements at the end of that.
//
// If there are failures on later iterations, wait for SQL
// statements to end. The cache might be porous. SQL might be the
// only way to make progress.
sqlSegmentMapFutures.addAll(response.sqlSegmentMapFutures);
if (failureCount == 0 || iteration > 0) {
// Wait on segments being loaded by someone else.
for (Map.Entry<SegmentHeader, Future<SegmentBody>> entry : response.futures.entrySet()) {
final SegmentHeader header = entry.getKey();
final Future<SegmentBody> bodyFuture = entry.getValue();
final SegmentBody body = Util.safeGet(bodyFuture, "Waiting for someone else's segment to load via SQL");
final SegmentWithData segmentWithData = response.convert(header, body);
segmentWithData.getStar().register(segmentWithData);
}
// Wait on segments being loaded by SQL statements we asked for.
for (Future<Map<Segment, SegmentWithData>> sqlSegmentMapFuture : sqlSegmentMapFutures) {
final Map<Segment, SegmentWithData> segmentMap = Util.safeGet(sqlSegmentMapFuture, "Waiting for segment to load via SQL");
for (SegmentWithData segmentWithData : segmentMap.values()) {
segmentWithData.getStar().register(segmentWithData);
}
// TODO: also pass back SegmentHeader and SegmentBody,
// and add these to headerBodies. Might help?
}
}
if (failureCount == 0) {
break;
}
// Figure out which cell requests are not satisfied by any of the
// segments retrieved.
@SuppressWarnings("unchecked") List<CellRequest> old = new ArrayList<CellRequest>(cellRequests1);
cellRequests1.clear();
for (CellRequest cellRequest : old) {
if (cellRequest.getMeasure().getStar().getCellFromCache(cellRequest, null) == null) {
cellRequests1.add(cellRequest);
}
}
if (cellRequests1.isEmpty()) {
break;
}
if (cellRequests1.size() >= old.size() && iteration > 10) {
throw Util.newError("Cache round-trip did not resolve any cell requests. " + "Iteration #" + iteration + "; request count " + cellRequests1.size() + "; requested headers: " + response.cacheSegments.size() + "; requested rollups: " + response.rollups.size() + "; requested SQL: " + response.sqlSegmentMapFutures.size());
}
// Continue loop; form and execute a new request with the smaller
// set of cell requests.
}
dirty = false;
cellRequests.clear();
return true;
}
Aggregations