use of me.retrodaredevil.solarthing.packets.collection.InstancePacketGroup in project solarthing by wildmountainfarms.
the class PacketFinder method updateWithRange.
private void updateWithRange(long queryStart, long queryEnd) {
List<? extends InstancePacketGroup> rawPackets = simpleQueryHandler.queryStatus(queryStart, queryEnd, null);
synchronized (cacheMap) {
for (InstancePacketGroup instancePacketGroup : rawPackets) {
int fragmentId = instancePacketGroup.getFragmentId();
for (Packet packet : instancePacketGroup.getPackets()) {
if (packet instanceof Identifiable) {
Identifiable identifiable = (Identifiable) packet;
IdentifierFragment packetIdentifierFragment = IdentifierFragment.create(fragmentId, identifiable.getIdentifier());
cacheMap.putIfAbsent(packetIdentifierFragment, identifiable);
}
}
}
}
}
use of me.retrodaredevil.solarthing.packets.collection.InstancePacketGroup in project solarthing by wildmountainfarms.
the class CacheHandler method calculatePeriod.
private List<CacheDataPacket> calculatePeriod(long startPeriodNumber, long endPeriodNumber) {
Instant firstPeriodStart = getPeriodStartFromNumber(startPeriodNumber);
Instant lastPeriodEnd = getPeriodStartFromNumber(endPeriodNumber).plus(duration);
Instant queryStart = firstPeriodStart.minus(INFO_DURATION);
MillisQuery millisQuery = new MillisQueryBuilder().startKey(queryStart.toEpochMilli()).endKey(lastPeriodEnd.toEpochMilli()).inclusiveEnd(false).build();
final List<? extends PacketGroup> packetGroups;
try {
packetGroups = database.getStatusDatabase().query(millisQuery);
} catch (SolarThingDatabaseException e) {
// The consumers of this API may be ok if there are holes in the data rather than getting no data at all, so maybe change this later?
throw new DatabaseException("Couldn't query status packets for period. startPeriodNumber: " + startPeriodNumber + " endPeriodNumber: " + endPeriodNumber + " firstPeriodStart: " + firstPeriodStart, e);
}
List<CacheDataPacket> r = new ArrayList<>();
Map<String, List<InstancePacketGroup>> sourceMap = PacketGroups.parsePackets(packetGroups, defaultInstanceOptions);
for (Map.Entry<String, List<InstancePacketGroup>> entry : sourceMap.entrySet()) {
String sourceId = entry.getKey();
List<InstancePacketGroup> packets = entry.getValue();
for (long periodNumber = startPeriodNumber; periodNumber <= endPeriodNumber; periodNumber++) {
Instant periodStart = getPeriodStartFromNumber(periodNumber);
for (CacheCreator creator : CACHE_CREATORS) {
r.add(creator.createFrom(sourceId, packets, periodStart, duration));
}
}
}
return r;
}
use of me.retrodaredevil.solarthing.packets.collection.InstancePacketGroup in project solarthing by wildmountainfarms.
the class InfluxDbPacketSaver method handle.
@Override
public void handle(PacketCollection packetCollection) throws PacketHandleException {
try (InfluxDB db = createDatabase()) {
final InstancePacketGroup packetGroup = PacketGroups.parseToInstancePacketGroup(packetCollection, DefaultInstanceOptions.REQUIRE_NO_DEFAULTS);
DefaultInstanceOptions.requireNoDefaults(packetGroup);
final String database = databaseNameGetter.getName(packetGroup);
try {
QueryResult result = db.query(new Query("CREATE DATABASE " + database, null, true));
String error = getError(result);
if (error != null) {
throw new PacketHandleException("Result got error! error: " + result);
}
} catch (InfluxDBException ex) {
throw new PacketHandleException("Unable to query the database!", ex);
}
final RetentionPolicySetting retentionPolicySetting = retentionPolicyGetter.getRetentionPolicySetting();
final String retentionPolicyName;
// region Retention Policy Creation Logic
if (retentionPolicySetting != null) {
retentionPolicyName = retentionPolicySetting.getName();
if (retentionPolicyName != null) {
final RetentionPolicy policy = retentionPolicySetting.getRetentionPolicy();
if (policy != null) {
final String policyString = policy.toPolicyStringInfluxDb1(retentionPolicyName, database);
final boolean needsAlter;
if (retentionPolicySetting.isTryToCreate()) {
final QueryResult result;
final String query = "CREATE " + policyString;
try {
result = db.query(new Query(query, null, true));
} catch (InfluxDBException ex) {
throw new PacketHandleException("Unable to query database to create retention policy: " + retentionPolicyName + " query: " + query, ex);
}
String error = getError(result);
if (retentionPolicySetting.isIgnoreUnsuccessfulCreate()) {
if (error != null) {
LOGGER.debug("We're going to ignore this error we got while trying to create a retention policy. Error: {}", error);
}
needsAlter = false;
} else {
if (error != null) {
LOGGER.debug("Got error while trying to create! Error: " + error);
}
needsAlter = error != null;
}
if (needsAlter && !retentionPolicySetting.isAutomaticallyAlter()) {
throw new PacketHandleException("Got error while trying to create retention policy: " + retentionPolicyName + ". Error: " + error);
}
} else {
needsAlter = true;
}
if (needsAlter) {
if (retentionPolicySetting.isAutomaticallyAlter()) {
final QueryResult alterResult;
try {
alterResult = db.query(new Query("ALTER " + policyString));
LOGGER.info(SolarThingConstants.SUMMARY_MARKER, "Successfully altered {} retention policy!", retentionPolicyName);
} catch (InfluxDBException ex) {
throw new PacketHandleException("Unable to query database to alter retention policy: " + retentionPolicyName, ex);
}
String error = getError(alterResult);
if (error != null) {
throw new PacketHandleException("Unable to alter retention policy: " + retentionPolicyName + ". Error: " + error);
}
} else {
throw new PacketHandleException("Retention policy: " + retentionPolicyName + " needs to be altered but automatically alter is false!");
}
}
}
}
} else {
retentionPolicyName = null;
}
// endregion
final long time = packetCollection.getDateMillis();
final BatchPoints points = BatchPoints.database(database).tag("sourceId", packetGroup.getSourceId()).tag("fragmentId", "" + packetGroup.getFragmentId()).consistency(InfluxDB.ConsistencyLevel.ALL).retentionPolicy(// may be null, but that's OK
retentionPolicyName).build();
int packetsWritten = 0;
for (Packet packet : packetGroup.getPackets()) {
Point.Builder pointBuilder = pointCreator.createBuilder(packet).time(time, TimeUnit.MILLISECONDS);
Collection<String> tagKeys = PointUtil.getTagKeys(packet.getClass());
ObjectNode json = OBJECT_MAPPER.valueToTree(packet);
for (Map.Entry<String, ValueNode> entry : PointUtil.flattenJsonObject(json)) {
String key = entry.getKey();
ValueNode prim = entry.getValue();
if (tagKeys.contains(key)) {
pointBuilder.tag(key, prim.asText());
}
if (prim.isNumber()) {
// always store as float datatype
pointBuilder.addField(key, prim.asDouble());
} else if (prim.isTextual() || prim.isBinary()) {
pointBuilder.addField(key, prim.asText());
} else if (prim.isBoolean()) {
pointBuilder.addField(key, prim.asBoolean());
} else
throw new AssertionError("This primitive isn't a number, string/binary or boolean! It's: " + prim + " class: " + prim.getClass() + " text=" + prim.asText());
}
points.point(pointBuilder.build());
packetsWritten++;
}
try {
db.write(points);
} catch (InfluxDBException ex) {
throw new PacketHandleException("We were able to query the database, but unable to write the points to it!", ex);
}
LOGGER.debug("Wrote {} packets to InfluxDB! database={} retention policy={}", packetsWritten, database, retentionPolicyName);
}
}
use of me.retrodaredevil.solarthing.packets.collection.InstancePacketGroup in project solarthing by wildmountainfarms.
the class InfluxDb2PacketSaver method handle.
@Override
public void handle(PacketCollection packetCollection) throws PacketHandleException {
final InstancePacketGroup packetGroup = PacketGroups.parseToInstancePacketGroup(packetCollection, DefaultInstanceOptions.REQUIRE_NO_DEFAULTS);
DefaultInstanceOptions.requireNoDefaults(packetGroup);
Organization organization = findOrCreateOrg();
Bucket bucket = findOrCreateBucket(bucketNameGetter.getName(packetGroup), organization);
final long time = packetCollection.getDateMillis();
List<Point> points = new ArrayList<>();
for (Packet packet : packetGroup.getPackets()) {
Point point = pointCreator.createBuilder(packet).time(time, WritePrecision.MS);
Collection<String> tagKeys = PointUtil.getTagKeys(packet.getClass());
ObjectNode json = OBJECT_MAPPER.valueToTree(packet);
for (Map.Entry<String, ValueNode> entry : PointUtil.flattenJsonObject(json)) {
String key = entry.getKey();
ValueNode prim = entry.getValue();
if (tagKeys.contains(key)) {
point.addTag(key, prim.asText());
}
if (prim.isNumber()) {
// always store as float datatype because you can never change the type from int to float easily
final Number value;
if (prim.isBigDecimal()) {
DecimalNode decimal = (DecimalNode) prim;
value = decimal.decimalValue();
} else {
value = prim.asDouble();
}
point.addField(key, value);
} else if (prim.isTextual() || prim.isBinary()) {
point.addField(key, prim.asText());
} else if (prim.isBoolean()) {
point.addField(key, prim.asBoolean());
} else
throw new AssertionError("This primitive isn't a number, string/binary or boolean! It's: " + prim + " class: " + prim.getClass() + " text=" + prim.asText());
}
points.add(point);
}
try {
client.getWriteApiBlocking().writePoints(bucket.getName(), bucket.getOrgID(), points);
} catch (InfluxException exception) {
throw new PacketHandleException("Could not write points", exception);
}
}
Aggregations