use of me.retrodaredevil.solarthing.rest.exceptions.DatabaseException in project solarthing by wildmountainfarms.
the class CacheHandler method queryOrCalculateCaches.
private <T extends CacheDataPacket> List<T> queryOrCalculateCaches(TypeReference<T> typeReference, String cacheName, String sourceId, long startPeriodNumber, long endPeriodNumber) {
// the document IDs needed to return data
List<String> documentIds = new ArrayList<>();
// a map from a document ID to a period number
Map<String, Long> documentIdPeriodNumberMap = new HashMap<>();
for (long periodNumber = startPeriodNumber; periodNumber <= endPeriodNumber; periodNumber++) {
Instant periodStart = getPeriodStartFromNumber(periodNumber);
String documentId = CacheUtil.getDocumentId(periodStart, duration, sourceId, cacheName);
documentIds.add(documentId);
documentIdPeriodNumberMap.put(documentId, periodNumber);
}
BulkGetRequest request = BulkGetRequest.from(documentIds);
final BulkGetResponse response;
try {
response = cacheDatabase.getDocumentsBulk(request);
} catch (CouchDbException e) {
throw new DatabaseException("CouchDB exception | message: " + e.getMessage(), e);
}
// map for documents that need to be updated. The value represents the revision that needs to be used to update it
Map<String, String> documentIdRevisionMapForUpdate = new HashMap<>();
// Map for period number -> cached data. This helps us make sure we only return a single piece of data for each period
Map<Long, T> periodNumberPacketMap = new TreeMap<>();
// Set for document IDs that we already have and do not need to be updated
Set<String> doNotUpdateDocumentIdsSet = new HashSet<>();
Long queryStartPeriodNumber = null;
Long queryEndPeriodNumber = null;
for (BulkGetResponse.Result result : response.getResults()) {
if (result.hasConflicts()) {
// replication going on with their databases.
throw new UnexpectedResponseException("cache document with conflict! doc id: " + result.getDocumentId());
}
Long periodNumber = documentIdPeriodNumberMap.get(result.getDocumentId());
if (periodNumber == null) {
throw new IllegalStateException("Could not get period number for doc id: " + result.getDocumentId() + ". This should never happen.");
}
T value = null;
if (!result.isError()) {
JsonData jsonData = result.getJsonDataAssertNotConflicted();
try {
value = CouchDbJacksonUtil.readValue(mapper, jsonData, typeReference);
if (value.getSourceId().equals(sourceId) && value.getCacheName().equals(cacheName)) {
periodNumberPacketMap.put(periodNumber, value);
}
doNotUpdateDocumentIdsSet.add(value.getDbId());
} catch (JsonProcessingException ex) {
// If we are in this catch block, one of two things has happened:
// The JSON is invalid (unlikely), we updated how a given cache is serialized/deserialized, or we're dumb and never tested to see if the deserialization works.
// If the JSON is actually invalid, getRevisionFromJsonData will throw an exception. Otherwise, we need to calculate the given cache again
String revision = getRevisionFromJsonData(jsonData);
documentIdRevisionMapForUpdate.put(result.getDocumentId(), revision);
}
}
if (value == null) {
if (queryStartPeriodNumber == null) {
queryStartPeriodNumber = periodNumber;
queryEndPeriodNumber = periodNumber;
} else {
queryStartPeriodNumber = Math.min(queryStartPeriodNumber, periodNumber);
queryEndPeriodNumber = Math.max(queryEndPeriodNumber, periodNumber);
}
}
}
if (queryStartPeriodNumber != null) {
List<CacheDataPacket> calculatedPackets = calculatePeriod(queryStartPeriodNumber, queryEndPeriodNumber);
List<JsonData> calculatedPacketsJsonDataList = new ArrayList<>();
int updateAttemptCount = 0;
for (CacheDataPacket packet : calculatedPackets) {
if (doNotUpdateDocumentIdsSet.contains(packet.getDbId())) {
continue;
}
if (!sourceId.equals(packet.getSourceId()) || !cacheName.equals(packet.getCacheName())) {
// This is the same with different cache names, we don't know which ones we need to update and which ones we cannot
continue;
}
JsonData json;
try {
String revision = documentIdRevisionMapForUpdate.get(packet.getDbId());
if (revision == null) {
json = new StringJsonData(mapper.writeValueAsString(packet));
} else {
json = new StringJsonData(mapper.writeValueAsString(new DocumentRevisionWrapper(revision, packet)));
updateAttemptCount++;
}
} catch (JsonProcessingException e) {
throw new RuntimeException("Should be able to serialize!", e);
}
calculatedPacketsJsonDataList.add(json);
}
final List<BulkDocumentResponse> postResponse;
try {
postResponse = cacheDatabase.postDocumentsBulk(new BulkPostRequest(calculatedPacketsJsonDataList));
} catch (CouchDbException e) {
throw new DatabaseException("Could not update cache", e);
}
int successCount = 0;
int failCount = 0;
for (BulkDocumentResponse documentResponse : postResponse) {
if (documentResponse.isOk()) {
successCount++;
} else {
failCount++;
LOGGER.info("Error: " + documentResponse.getError() + " reason: " + documentResponse.getReason() + " on id: " + documentResponse.getId());
}
}
LOGGER.debug("(Cache updating) Success: " + successCount + " fail: " + failCount + ". Tried to update: " + updateAttemptCount);
int numberOfWantedType = 0;
for (CacheDataPacket cacheDataPacket : calculatedPackets) {
if (cacheDataPacket.getSourceId().equals(sourceId) && cacheDataPacket.getCacheName().equals(cacheName)) {
@SuppressWarnings("unchecked") T packet = (T) cacheDataPacket;
Long periodNumber = documentIdPeriodNumberMap.get(cacheDataPacket.getDbId());
if (periodNumber == null) {
throw new NullPointerException("No period number for id: " + cacheDataPacket.getDbId());
}
periodNumberPacketMap.put(periodNumber, packet);
numberOfWantedType++;
}
}
LOGGER.debug("Calculated " + calculatedPackets.size() + " and " + numberOfWantedType + " were of type " + cacheName);
} else {
LOGGER.trace("Didn't have to get any data");
}
return new ArrayList<>(periodNumberPacketMap.values());
}
use of me.retrodaredevil.solarthing.rest.exceptions.DatabaseException in project solarthing by wildmountainfarms.
the class SimpleQueryHandler method queryMeta.
public MetaDatabase queryMeta() {
final VersionedPacket<RootMetaPacket> metadata;
synchronized (this) {
final VersionedPacket<RootMetaPacket> currentCache = metadataCache;
final Long lastMetadataCacheNanos = this.lastMetadataCacheNanos;
if (lastMetadataCacheNanos != null && System.nanoTime() - lastMetadataCacheNanos < METADATA_CACHE_VALID.toNanos()) {
requireNonNull(currentCache);
metadata = currentCache;
} else {
UpdateToken updateToken = currentCache == null ? null : currentCache.getUpdateToken();
final VersionedPacket<RootMetaPacket> newMetadata;
try {
newMetadata = database.queryMetadata(updateToken);
} catch (NotFoundSolarThingDatabaseException e) {
// If we have not defined metadata, then we return an "empty" instance
return EmptyMetaDatabase.getInstance();
} catch (SolarThingDatabaseException e) {
throw new DatabaseException("Could not query meta", e);
}
this.lastMetadataCacheNanos = System.nanoTime();
if (newMetadata == null) {
requireNonNull(currentCache);
metadata = currentCache;
} else {
metadataCache = newMetadata;
metadata = newMetadata;
}
}
}
return new DefaultMetaDatabase(metadata.getPacket());
}
use of me.retrodaredevil.solarthing.rest.exceptions.DatabaseException in project solarthing by wildmountainfarms.
the class SimpleQueryHandler method queryPackets.
/**
* @param from The date millis from
* @param to The date millis to
* @param sourceId The source ID or null. If null, the returned List may contain packet groups from multiple sources
* @return The resulting packets
*/
private List<? extends InstancePacketGroup> queryPackets(MillisDatabase database, long from, long to, String sourceId) {
MillisQuery millisQuery = new MillisQueryBuilder().startKey(from).endKey(to).build();
UniqueQuery uniqueQuery = new UniqueQuery(database, millisQuery);
final Future<? extends List<? extends PacketGroup>> future;
{
// Many times a Grafana dashboard will make many graphql requests with the same from and to parameters.
// Without this code, each graphql request would result in a separate request to the database.
// Most of the time, these requests are being executed at the same time.
// This piece of code takes advantage of the fact that we are requesting the same data at the same time.
// If we find a Future that is already executing for a given query, we wait for that to complete instead of performing a separate request.
// This is sort of a caching mechanism, but it's a VERY temporary caching mechanism since data is not kept after is it queried.
executingQueryMutex.lock();
var currentFuture = executingQueryMap.get(uniqueQuery);
if (currentFuture != null) {
future = currentFuture;
executingQueryMutex.unlock();
} else {
RunnableFuture<? extends List<? extends PacketGroup>> runnableFuture = new FutureTask<>(() -> {
try {
return database.query(millisQuery);
} catch (SolarThingDatabaseException e) {
throw new DatabaseException("Exception querying from " + from + " to " + to, e);
}
});
executingQueryMap.put(uniqueQuery, runnableFuture);
executingQueryMutex.unlock();
runnableFuture.run();
future = runnableFuture;
executingQueryMutex.lock();
executingQueryMap.remove(uniqueQuery);
executingQueryMutex.unlock();
}
}
final List<? extends PacketGroup> rawPacketGroups;
try {
rawPacketGroups = future.get();
} catch (InterruptedException e) {
throw new DatabaseException("Interrupted!", e);
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof RuntimeException) {
throw (RuntimeException) cause;
}
throw new DatabaseException("Unknown execution exception", e);
}
if (rawPacketGroups.isEmpty()) {
if (to - from > 60 * 1000) {
// Only debug this message if the requester is actually asking for a decent chunk of data
LOGGER.debug("No packets were queried between " + from + " and " + to);
}
return Collections.emptyList();
}
if (sourceId == null) {
return PacketGroups.parseToInstancePacketGroups(rawPacketGroups, defaultInstanceOptions);
}
Map<String, List<InstancePacketGroup>> map = PacketGroups.parsePackets(rawPacketGroups, defaultInstanceOptions);
if (map.containsKey(sourceId)) {
List<InstancePacketGroup> instancePacketGroupList = map.get(sourceId);
return PacketGroups.orderByFragment(instancePacketGroupList);
}
throw new NoSuchElementException("No element with sourceId: '" + sourceId + "' available keys are: " + map.keySet());
}
use of me.retrodaredevil.solarthing.rest.exceptions.DatabaseException in project solarthing by wildmountainfarms.
the class SolarThingExceptionHandler method onException.
/*
It's worth nothing that SimpleDataFetcherExceptionHandler appends a "notprivacysafe." in front of the full class name for the logger.
We aren't going to do that here, as there's no private information, but it's just worth noting the difference here.
*/
@Override
public DataFetcherExceptionHandlerResult onException(DataFetcherExceptionHandlerParameters handlerParameters) {
Throwable exception = handlerParameters.getException();
SourceLocation sourceLocation = handlerParameters.getSourceLocation();
ResultPath path = handlerParameters.getPath();
if (exception instanceof DatabaseException) {
// this is the most common exception, usually caused by a timeout
LOGGER.info("Got database exception", exception);
} else {
LOGGER.warn("Got uncommon exception", exception);
}
ExceptionWhileDataFetching error = new ExceptionWhileDataFetching(path, exception, sourceLocation);
return DataFetcherExceptionHandlerResult.newResult().error(error).build();
}
use of me.retrodaredevil.solarthing.rest.exceptions.DatabaseException in project solarthing by wildmountainfarms.
the class CacheHandler method calculatePeriod.
private List<CacheDataPacket> calculatePeriod(long startPeriodNumber, long endPeriodNumber) {
Instant firstPeriodStart = getPeriodStartFromNumber(startPeriodNumber);
Instant lastPeriodEnd = getPeriodStartFromNumber(endPeriodNumber).plus(duration);
Instant queryStart = firstPeriodStart.minus(INFO_DURATION);
MillisQuery millisQuery = new MillisQueryBuilder().startKey(queryStart.toEpochMilli()).endKey(lastPeriodEnd.toEpochMilli()).inclusiveEnd(false).build();
final List<? extends PacketGroup> packetGroups;
try {
packetGroups = database.getStatusDatabase().query(millisQuery);
} catch (SolarThingDatabaseException e) {
// The consumers of this API may be ok if there are holes in the data rather than getting no data at all, so maybe change this later?
throw new DatabaseException("Couldn't query status packets for period. startPeriodNumber: " + startPeriodNumber + " endPeriodNumber: " + endPeriodNumber + " firstPeriodStart: " + firstPeriodStart, e);
}
List<CacheDataPacket> r = new ArrayList<>();
Map<String, List<InstancePacketGroup>> sourceMap = PacketGroups.parsePackets(packetGroups, defaultInstanceOptions);
for (Map.Entry<String, List<InstancePacketGroup>> entry : sourceMap.entrySet()) {
String sourceId = entry.getKey();
List<InstancePacketGroup> packets = entry.getValue();
for (long periodNumber = startPeriodNumber; periodNumber <= endPeriodNumber; periodNumber++) {
Instant periodStart = getPeriodStartFromNumber(periodNumber);
for (CacheCreator creator : CACHE_CREATORS) {
r.add(creator.createFrom(sourceId, packets, periodStart, duration));
}
}
}
return r;
}
Aggregations