use of org.jboss.netty.util.TimerTask in project traccar by traccar.
the class ConnectionManager method updateDevice.
public void updateDevice(final long deviceId, String status, Date time) {
Device device = Context.getIdentityManager().getById(deviceId);
if (device == null) {
return;
}
String oldStatus = device.getStatus();
device.setStatus(status);
if (enableStatusEvents && !status.equals(oldStatus)) {
String eventType;
Map<Event, Position> events = new HashMap<>();
switch(status) {
case Device.STATUS_ONLINE:
eventType = Event.TYPE_DEVICE_ONLINE;
break;
case Device.STATUS_UNKNOWN:
eventType = Event.TYPE_DEVICE_UNKNOWN;
if (updateDeviceState) {
events.putAll(updateDeviceState(deviceId));
}
break;
default:
eventType = Event.TYPE_DEVICE_OFFLINE;
if (updateDeviceState) {
events.putAll(updateDeviceState(deviceId));
}
break;
}
events.put(new Event(eventType, deviceId), null);
Context.getNotificationManager().updateEvents(events);
}
Timeout timeout = timeouts.remove(deviceId);
if (timeout != null) {
timeout.cancel();
}
if (time != null) {
device.setLastUpdate(time);
}
if (status.equals(Device.STATUS_ONLINE)) {
timeouts.put(deviceId, GlobalTimer.getTimer().newTimeout(new TimerTask() {
@Override
public void run(Timeout timeout) throws Exception {
if (!timeout.isCancelled()) {
updateDevice(deviceId, Device.STATUS_UNKNOWN, null);
activeDevices.remove(deviceId);
}
}
}, deviceTimeout, TimeUnit.MILLISECONDS));
}
try {
Context.getDeviceManager().updateDeviceStatus(device);
} catch (SQLException error) {
Log.warning(error);
}
updateDevice(device);
}
use of org.jboss.netty.util.TimerTask in project opentsdb by OpenTSDB.
the class PutDataPointRpc method processDataPoint.
/**
* Handles one or more incoming data point types for the HTTP endpoint
* to put raw, rolled up or aggregated data points
* @param <T> An {@link IncomingDataPoint} class.
* @param tsdb The TSDB to which we belong
* @param query The query to respond to
* @param dps The de-serialized data points
* @throws BadRequestException if the data is invalid in some way
* @since 2.4
*/
public <T extends IncomingDataPoint> void processDataPoint(final TSDB tsdb, final HttpQuery query, final List<T> dps) {
if (dps.size() < 1) {
throw new BadRequestException("No datapoints found in content");
}
final HashMap<String, String> query_tags = new HashMap<String, String>();
final boolean show_details = query.hasQueryStringParam("details");
final boolean show_summary = query.hasQueryStringParam("summary");
final boolean synchronous = query.hasQueryStringParam("sync");
final int sync_timeout = query.hasQueryStringParam("sync_timeout") ? Integer.parseInt(query.getQueryStringParam("sync_timeout")) : 0;
// this is used to coordinate timeouts
final AtomicBoolean sending_response = new AtomicBoolean();
sending_response.set(false);
final List<Map<String, Object>> details = show_details ? new ArrayList<Map<String, Object>>() : null;
int queued = 0;
final List<Deferred<Boolean>> deferreds = synchronous ? new ArrayList<Deferred<Boolean>>(dps.size()) : null;
if (tsdb.getConfig().enable_header_tag()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Looking for tag header " + tsdb.getConfig().get_name_header_tag());
}
final String header_tag_value = query.getHeaderValue(tsdb.getConfig().get_name_header_tag());
if (header_tag_value != null) {
if (LOG.isDebugEnabled()) {
LOG.debug(" header found with value:" + header_tag_value);
}
Tags.parse(query_tags, header_tag_value);
} else if (LOG.isDebugEnabled()) {
LOG.debug(" no such header in request");
}
}
for (final IncomingDataPoint dp : dps) {
final DataPointType type;
if (dp instanceof RollUpDataPoint) {
type = DataPointType.ROLLUP;
rollup_dps.incrementAndGet();
} else if (dp instanceof HistogramPojo) {
type = DataPointType.HISTOGRAM;
raw_histograms.incrementAndGet();
} else {
type = DataPointType.PUT;
raw_dps.incrementAndGet();
}
/*
Error back callback to handle storage failures
*/
final class PutErrback implements Callback<Boolean, Exception> {
public Boolean call(final Exception arg) {
if (arg instanceof PleaseThrottleException) {
inflight_exceeded.incrementAndGet();
} else {
hbase_errors.incrementAndGet();
}
if (show_details) {
details.add(getHttpDetails("Storage exception: " + arg.getMessage(), dp));
}
// we handle the storage exceptions here so as to avoid creating yet
// another callback object on every data point.
handleStorageException(tsdb, dp, arg);
return false;
}
public String toString() {
return "HTTP Put exception";
}
}
final class SuccessCB implements Callback<Boolean, Object> {
@Override
public Boolean call(final Object obj) {
switch(type) {
case PUT:
raw_stored.incrementAndGet();
break;
case ROLLUP:
rollup_stored.incrementAndGet();
break;
case HISTOGRAM:
raw_histograms_stored.incrementAndGet();
break;
default:
}
return true;
}
}
try {
if (dp == null) {
if (show_details) {
details.add(this.getHttpDetails("Unexpected null datapoint encountered in set.", dp));
}
LOG.warn("Datapoint null was encountered in set.");
illegal_arguments.incrementAndGet();
continue;
}
if (!dp.validate(details)) {
illegal_arguments.incrementAndGet();
continue;
}
// TODO - refactor the add calls someday or move some of this into the
// actual data point class.
final Deferred<Boolean> deferred;
if (type == DataPointType.HISTOGRAM) {
final HistogramPojo pojo = (HistogramPojo) dp;
// validation and/or conversion before storage of histograms by
// decoding then re-encoding.
final Histogram hdp;
if (Strings.isNullOrEmpty(dp.getValue())) {
hdp = pojo.toSimpleHistogram(tsdb);
} else {
hdp = tsdb.histogramManager().decode(pojo.getId(), pojo.getBytes(), false);
}
deferred = tsdb.addHistogramPoint(pojo.getMetric(), pojo.getTimestamp(), tsdb.histogramManager().encode(hdp.getId(), hdp, true), pojo.getTags()).addCallback(new SuccessCB()).addErrback(new PutErrback());
} else {
if (Tags.looksLikeInteger(dp.getValue())) {
switch(type) {
case ROLLUP:
{
final RollUpDataPoint rdp = (RollUpDataPoint) dp;
deferred = tsdb.addAggregatePoint(rdp.getMetric(), rdp.getTimestamp(), Tags.parseLong(rdp.getValue()), dp.getTags(), rdp.getGroupByAggregator() != null, rdp.getInterval(), rdp.getAggregator(), rdp.getGroupByAggregator()).addCallback(new SuccessCB()).addErrback(new PutErrback());
break;
}
default:
deferred = tsdb.addPoint(dp.getMetric(), dp.getTimestamp(), Tags.parseLong(dp.getValue()), dp.getTags()).addCallback(new SuccessCB()).addErrback(new PutErrback());
}
} else {
switch(type) {
case ROLLUP:
{
final RollUpDataPoint rdp = (RollUpDataPoint) dp;
deferred = tsdb.addAggregatePoint(rdp.getMetric(), rdp.getTimestamp(), (Tags.fitsInFloat(dp.getValue()) ? Float.parseFloat(dp.getValue()) : Double.parseDouble(dp.getValue())), dp.getTags(), rdp.getGroupByAggregator() != null, rdp.getInterval(), rdp.getAggregator(), rdp.getGroupByAggregator()).addCallback(new SuccessCB()).addErrback(new PutErrback());
break;
}
default:
deferred = tsdb.addPoint(dp.getMetric(), dp.getTimestamp(), (Tags.fitsInFloat(dp.getValue()) ? Float.parseFloat(dp.getValue()) : Double.parseDouble(dp.getValue())), dp.getTags()).addCallback(new SuccessCB()).addErrback(new PutErrback());
}
}
}
++queued;
if (synchronous) {
deferreds.add(deferred);
}
} catch (NumberFormatException x) {
if (show_details) {
details.add(getHttpDetails("Unable to parse value to a number", dp));
}
LOG.warn("Unable to parse value to a number: " + dp);
invalid_values.incrementAndGet();
} catch (IllegalArgumentException iae) {
if (show_details) {
details.add(getHttpDetails(iae.getMessage(), dp));
}
LOG.warn(iae.getMessage() + ": " + dp);
illegal_arguments.incrementAndGet();
} catch (NoSuchUniqueName nsu) {
if (show_details) {
details.add(getHttpDetails("Unknown metric", dp));
}
LOG.warn("Unknown metric: " + dp);
unknown_metrics.incrementAndGet();
} catch (PleaseThrottleException x) {
handleStorageException(tsdb, dp, x);
if (show_details) {
details.add(getHttpDetails("Please throttle", dp));
}
inflight_exceeded.incrementAndGet();
} catch (TimeoutException tex) {
handleStorageException(tsdb, dp, tex);
if (show_details) {
details.add(getHttpDetails("Timeout exception", dp));
}
requests_timedout.incrementAndGet();
/*} catch (NoSuchUniqueNameInCache x) {
handleStorageException(tsdb, dp, x);
if (show_details) {
details.add(getHttpDetails("Not cached yet", dp));
} */
} catch (RuntimeException e) {
if (show_details) {
details.add(getHttpDetails("Unexpected exception", dp));
}
LOG.warn("Unexpected exception: " + dp, e);
unknown_errors.incrementAndGet();
}
}
/**
* A timer task that will respond to the user with the number of timeouts
* for synchronous writes.
*/
class PutTimeout implements TimerTask {
final int queued;
public PutTimeout(final int queued) {
this.queued = queued;
}
@Override
public void run(final Timeout timeout) throws Exception {
if (sending_response.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Put data point call " + query + " already responded successfully");
}
return;
} else {
sending_response.set(true);
}
// figure out how many writes are outstanding
int good_writes = 0;
int failed_writes = 0;
int timeouts = 0;
for (int i = 0; i < deferreds.size(); i++) {
try {
if (deferreds.get(i).join(1)) {
++good_writes;
} else {
++failed_writes;
}
} catch (TimeoutException te) {
if (show_details) {
details.add(getHttpDetails("Write timedout", dps.get(i)));
}
++timeouts;
}
}
writes_timedout.addAndGet(timeouts);
final int failures = dps.size() - queued;
if (!show_summary && !show_details) {
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatErrorV1(new BadRequestException(HttpResponseStatus.BAD_REQUEST, "The put call has timedout with " + good_writes + " successful writes, " + failed_writes + " failed writes and " + timeouts + " timed out writes.", "Please see the TSD logs or append \"details\" to the put request")));
} else {
final HashMap<String, Object> summary = new HashMap<String, Object>();
summary.put("success", good_writes);
summary.put("failed", failures + failed_writes);
summary.put("timeouts", timeouts);
if (show_details) {
summary.put("errors", details);
}
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatPutV1(summary));
}
}
}
// now after everything has been sent we can schedule a timeout if so
// the caller asked for a synchronous write.
final Timeout timeout = sync_timeout > 0 ? tsdb.getTimer().newTimeout(new PutTimeout(queued), sync_timeout, TimeUnit.MILLISECONDS) : null;
/**
* Serializes the response to the client
*/
class GroupCB implements Callback<Object, ArrayList<Boolean>> {
final int queued;
public GroupCB(final int queued) {
this.queued = queued;
}
@Override
public Object call(final ArrayList<Boolean> results) {
if (sending_response.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Put data point call " + query + " was marked as timedout");
}
return null;
} else {
sending_response.set(true);
if (timeout != null) {
timeout.cancel();
}
}
int good_writes = 0;
int failed_writes = 0;
for (final boolean result : results) {
if (result) {
++good_writes;
} else {
++failed_writes;
}
}
final int failures = dps.size() - queued;
if (!show_summary && !show_details) {
if (failures + failed_writes > 0) {
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatErrorV1(new BadRequestException(HttpResponseStatus.BAD_REQUEST, "One or more data points had errors", "Please see the TSD logs or append \"details\" to the put request")));
} else {
query.sendReply(HttpResponseStatus.NO_CONTENT, "".getBytes());
}
} else {
final HashMap<String, Object> summary = new HashMap<String, Object>();
if (sync_timeout > 0) {
summary.put("timeouts", 0);
}
summary.put("success", results.isEmpty() ? queued : good_writes);
summary.put("failed", failures + failed_writes);
if (show_details) {
summary.put("errors", details);
}
if (failures > 0) {
query.sendReply(HttpResponseStatus.BAD_REQUEST, query.serializer().formatPutV1(summary));
} else {
query.sendReply(query.serializer().formatPutV1(summary));
}
}
return null;
}
@Override
public String toString() {
return "put data point serialization callback";
}
}
/**
* Catches any unexpected exceptions thrown in the callback chain
*/
class ErrCB implements Callback<Object, Exception> {
@Override
public Object call(final Exception e) throws Exception {
if (sending_response.get()) {
if (LOG.isDebugEnabled()) {
LOG.debug("ERROR point call " + query + " was marked as timedout", e);
}
return null;
} else {
sending_response.set(true);
if (timeout != null) {
timeout.cancel();
}
}
LOG.error("Unexpected exception", e);
throw new RuntimeException("Unexpected exception", e);
}
@Override
public String toString() {
return "put data point error callback";
}
}
if (synchronous) {
Deferred.groupInOrder(deferreds).addCallback(new GroupCB(queued)).addErrback(new ErrCB());
} else {
new GroupCB(queued).call(EMPTY_DEFERREDS);
}
}
Aggregations