use of org.openremote.model.attribute.AttributeExecuteStatus in project openremote by openremote.
the class AssetProcessingService method configure.
@Override
public void configure() throws Exception {
// A client wants to write attribute state through event bus
from(CLIENT_EVENT_TOPIC).routeId("FromClientUpdates").filter(body().isInstanceOf(AttributeEvent.class)).setHeader(HEADER_SOURCE, () -> CLIENT).to(ASSET_QUEUE);
// Process attribute events
/* TODO This message consumer should be transactionally consistent with the database, this is currently not the case
Our "if I have not processed this message before" duplicate detection:
- discard events with source time greater than server processing time (future events)
- discard events with source time less than last applied/stored event source time
- allow the rest (also events with same source time, order of application undefined)
Possible improvements moving towards at-least-once:
- Make AssetUpdateProcessor transactional with a two-phase commit API
- Replace at-most-once ClientEventService with at-least-once capable, embeddable message broker/protocol
- See pseudocode here: http://activemq.apache.org/should-i-use-xa.html
- Do we want JMS/AMQP/WSS or SOME_API/MQTT/WSS? ActiveMQ or Moquette?
*/
from(ASSET_QUEUE).routeId("AssetQueueProcessor").filter(body().isInstanceOf(AttributeEvent.class)).doTry().process(exchange -> withLock(getClass().getSimpleName() + "::processFromAssetQueue", () -> {
AttributeEvent event = exchange.getIn().getBody(AttributeEvent.class);
LOG.finest("Processing: " + event);
if (event.getEntityId() == null || event.getEntityId().isEmpty())
return;
if (event.getAttributeName() == null || event.getAttributeName().isEmpty())
return;
Source source = exchange.getIn().getHeader(HEADER_SOURCE, () -> null, Source.class);
if (source == null) {
throw new AssetProcessingException(MISSING_SOURCE);
}
// Process the asset update in a database transaction, this ensures that processors
// will see consistent database state and we only commit if no processor failed. This
// still won't make this procedure consistent with the message queue from which we consume!
persistenceService.doTransaction(em -> {
ServerAsset asset = assetStorageService.find(em, event.getEntityId(), true);
if (asset == null)
throw new AssetProcessingException(ASSET_NOT_FOUND);
AssetAttribute oldAttribute = asset.getAttribute(event.getAttributeName()).orElse(null);
if (oldAttribute == null)
throw new AssetProcessingException(ATTRIBUTE_NOT_FOUND);
// Agent attributes can't be updated with events
if (asset.getWellKnownType() == AssetType.AGENT) {
throw new AssetProcessingException(ILLEGAL_AGENT_UPDATE);
}
// For executable attributes, non-sensor sources can set a writable attribute execute status
if (oldAttribute.isExecutable() && source != SENSOR) {
Optional<AttributeExecuteStatus> status = event.getValue().flatMap(Values::getString).flatMap(AttributeExecuteStatus::fromString);
if (status.isPresent() && !status.get().isWrite()) {
throw new AssetProcessingException(INVALID_ATTRIBUTE_EXECUTE_STATUS);
}
}
switch(source) {
case CLIENT:
AuthContext authContext = exchange.getIn().getHeader(Constants.AUTH_CONTEXT, AuthContext.class);
if (authContext == null) {
throw new AssetProcessingException(NO_AUTH_CONTEXT);
}
// Check realm, must be accessible
if (!identityService.getIdentityProvider().isTenantActiveAndAccessible(authContext, asset)) {
throw new AssetProcessingException(INSUFFICIENT_ACCESS);
}
// Check read-only
if (oldAttribute.isReadOnly() && !authContext.isSuperUser()) {
throw new AssetProcessingException(INSUFFICIENT_ACCESS);
}
// Regular user must have write assets role
if (!authContext.hasResourceRoleOrIsSuperUser(ClientRole.WRITE_ASSETS.getValue(), Constants.KEYCLOAK_CLIENT_ID)) {
throw new AssetProcessingException(INSUFFICIENT_ACCESS);
}
// Check restricted user
if (identityService.getIdentityProvider().isRestrictedUser(authContext.getUserId())) {
// Must be asset linked to user
if (!assetStorageService.isUserAsset(authContext.getUserId(), event.getEntityId())) {
throw new AssetProcessingException(INSUFFICIENT_ACCESS);
}
// Must be writable by restricted client
if (!oldAttribute.isAccessRestrictedWrite()) {
throw new AssetProcessingException(INSUFFICIENT_ACCESS);
}
}
break;
case SENSOR:
Optional<AssetAttribute> protocolConfiguration = getAgentLink(oldAttribute).flatMap(agentService::getProtocolConfiguration);
// Sensor event must be for an attribute linked to a protocol configuration
if (!protocolConfiguration.isPresent()) {
throw new AssetProcessingException(INVALID_AGENT_LINK);
}
break;
}
// Either use the timestamp of the event or set event time to processing time
long processingTime = timerService.getCurrentTimeMillis();
long eventTime = event.getTimestamp() > 0 ? event.getTimestamp() : processingTime;
// the attribute until after that time (maybe that is desirable behaviour)
if (eventTime - processingTime > 0) {
// TODO: Decide how to handle update events in the future - ignore or change timestamp
throw new AssetProcessingException(EVENT_IN_FUTURE, "current time: " + new Date(processingTime) + "/" + processingTime + ", event time: " + new Date(eventTime) + "/" + eventTime);
}
// Check the last update timestamp of the attribute, ignoring any event that is older than last update
// TODO This means we drop out-of-sequence events but accept events with the same source timestamp
// TODO Several attribute events can occur in the same millisecond, then order of application is undefined
oldAttribute.getValueTimestamp().filter(t -> t >= 0 && eventTime < t).ifPresent(lastStateTime -> {
throw new AssetProcessingException(EVENT_OUTDATED, "last asset state time: " + new Date(lastStateTime) + "/" + lastStateTime + ", event time: " + new Date(eventTime) + "/" + eventTime);
});
// Create a copy of the attribute and set the new value and timestamp
AssetAttribute updatedAttribute = oldAttribute.deepCopy();
updatedAttribute.setValue(event.getValue().orElse(null), eventTime);
// Validate constraints of attribute
List<ValidationFailure> validationFailures = updatedAttribute.getValidationFailures();
if (!validationFailures.isEmpty()) {
throw new AssetProcessingException(ATTRIBUTE_VALIDATION_FAILURE, validationFailures.toString());
}
// Push through all processors
boolean consumedCompletely = processAssetUpdate(em, asset, updatedAttribute, source);
// Publish a new event for clients if no processor consumed the update completely
if (!consumedCompletely) {
publishClientEvent(asset, updatedAttribute);
}
});
})).endDoTry().doCatch(AssetProcessingException.class).process(handleAssetProcessingException(LOG));
}
Aggregations