use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class ExtractGrok method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) throws GrokException {
for (int i = 0; i < context.getMaxConcurrentTasks(); i++) {
final int maxBufferSize = context.getProperty(MAX_BUFFER_SIZE).asDataSize(DataUnit.B).intValue();
final byte[] buffer = new byte[maxBufferSize];
bufferQueue.add(buffer);
}
grok = new Grok();
grok.addPatternFromFile(context.getProperty(GROK_PATTERN_FILE).getValue());
grok.compile(context.getProperty(GROK_EXPRESSION).getValue(), context.getProperty(NAMED_CAPTURES_ONLY).asBoolean());
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class ExtractText method onScheduled.
@OnScheduled
public final void onScheduled(final ProcessContext context) throws IOException {
final Map<String, Pattern> compiledPatternsMap = new HashMap<>();
for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) {
if (!entry.getKey().isDynamic()) {
continue;
}
final int flags = getCompileFlags(context);
final Pattern pattern = Pattern.compile(entry.getValue(), flags);
compiledPatternsMap.put(entry.getKey().getName(), pattern);
}
compiledPattersMapRef.set(compiledPatternsMap);
for (int i = 0; i < context.getMaxConcurrentTasks(); i++) {
final int maxBufferSize = context.getProperty(MAX_BUFFER_SIZE).asDataSize(DataUnit.B).intValue();
final byte[] buffer = new byte[maxBufferSize];
bufferQueue.add(buffer);
}
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class GetTwitter method onScheduled.
@OnScheduled
public void onScheduled(final ProcessContext context) throws MalformedURLException {
final String endpointName = context.getProperty(ENDPOINT).getValue();
final Authentication oauth = new OAuth1(context.getProperty(CONSUMER_KEY).getValue(), context.getProperty(CONSUMER_SECRET).getValue(), context.getProperty(ACCESS_TOKEN).getValue(), context.getProperty(ACCESS_TOKEN_SECRET).getValue());
final ClientBuilder clientBuilder = new ClientBuilder();
clientBuilder.name("GetTwitter[id=" + getIdentifier() + "]").authentication(oauth).eventMessageQueue(eventQueue).processor(new StringDelimitedProcessor(messageQueue));
final String languageString = context.getProperty(LANGUAGES).getValue();
final List<String> languages;
if (languageString == null) {
languages = null;
} else {
languages = new ArrayList<>();
for (final String language : context.getProperty(LANGUAGES).getValue().split(",")) {
languages.add(language.trim());
}
}
final String host;
final StreamingEndpoint streamingEndpoint;
if (ENDPOINT_SAMPLE.getValue().equals(endpointName)) {
host = Constants.STREAM_HOST;
final StatusesSampleEndpoint sse = new StatusesSampleEndpoint();
streamingEndpoint = sse;
if (languages != null) {
sse.languages(languages);
}
} else if (ENDPOINT_FIREHOSE.getValue().equals(endpointName)) {
host = Constants.STREAM_HOST;
final StatusesFirehoseEndpoint firehoseEndpoint = new StatusesFirehoseEndpoint();
streamingEndpoint = firehoseEndpoint;
if (languages != null) {
firehoseEndpoint.languages(languages);
}
} else if (ENDPOINT_FILTER.getValue().equals(endpointName)) {
host = Constants.STREAM_HOST;
final StatusesFilterEndpoint filterEndpoint = new StatusesFilterEndpoint();
final String followingString = context.getProperty(FOLLOWING).getValue();
final List<Long> followingIds;
if (followingString == null) {
followingIds = Collections.emptyList();
} else {
followingIds = new ArrayList<>();
for (final String split : followingString.split(",")) {
final Long id = Long.parseLong(split.trim());
followingIds.add(id);
}
}
final String termString = context.getProperty(TERMS).getValue();
final List<String> terms;
if (termString == null) {
terms = Collections.emptyList();
} else {
terms = new ArrayList<>();
for (final String split : termString.split(",")) {
terms.add(split.trim());
}
}
if (!terms.isEmpty()) {
filterEndpoint.trackTerms(terms);
}
if (!followingIds.isEmpty()) {
filterEndpoint.followings(followingIds);
}
if (languages != null) {
filterEndpoint.languages(languages);
}
final String locationString = context.getProperty(LOCATIONS).getValue();
final List<Location> locations;
if (locationString == null) {
locations = Collections.emptyList();
} else {
locations = LocationUtil.parseLocations(locationString);
}
if (!locations.isEmpty()) {
filterEndpoint.locations(locations);
}
streamingEndpoint = filterEndpoint;
} else {
throw new AssertionError("Endpoint was invalid value: " + endpointName);
}
clientBuilder.hosts(host).endpoint(streamingEndpoint);
client = clientBuilder.build();
client.connect();
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class MonitorMemory method onConfigured.
@OnScheduled
public void onConfigured(final ConfigurationContext config) throws InitializationException {
final String desiredMemoryPoolName = config.getProperty(MEMORY_POOL_PROPERTY).getValue();
final String thresholdValue = config.getProperty(THRESHOLD_PROPERTY).getValue().trim();
threshold = thresholdValue;
final Long reportingIntervalValue = config.getProperty(REPORTING_INTERVAL).asTimePeriod(TimeUnit.MILLISECONDS);
if (reportingIntervalValue == null) {
reportingIntervalMillis = config.getSchedulingPeriod(TimeUnit.MILLISECONDS);
} else {
reportingIntervalMillis = reportingIntervalValue;
}
final List<MemoryPoolMXBean> memoryPoolBeans = ManagementFactory.getMemoryPoolMXBeans();
for (int i = 0; i < memoryPoolBeans.size() && monitoredBean == null; i++) {
MemoryPoolMXBean memoryPoolBean = memoryPoolBeans.get(i);
String memoryPoolName = memoryPoolBean.getName();
if (desiredMemoryPoolName.equals(memoryPoolName)) {
monitoredBean = memoryPoolBean;
if (memoryPoolBean.isCollectionUsageThresholdSupported()) {
long calculatedThreshold;
if (DATA_SIZE_PATTERN.matcher(thresholdValue).matches()) {
calculatedThreshold = DataUnit.parseDataSize(thresholdValue, DataUnit.B).longValue();
} else {
final String percentage = thresholdValue.substring(0, thresholdValue.length() - 1);
final double pct = Double.parseDouble(percentage) / 100D;
calculatedThreshold = (long) (monitoredBean.getUsage().getMax() * pct);
}
monitoredBean.setUsageThreshold(calculatedThreshold);
}
}
}
if (monitoredBean == null) {
throw new InitializationException("Found no JVM Memory Pool with name " + desiredMemoryPoolName + "; will not monitor Memory Pool");
}
}
use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.
the class StandardGangliaReporter method onConfigure.
@OnScheduled
public void onConfigure(final ConfigurationContext config) throws InitializationException {
metricsRegistry = new MetricsRegistry();
metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int32", "FlowFiles Received Last 5 mins"), new Gauge<Integer>() {
@Override
public Integer value() {
final ProcessGroupStatus status = latestStatus.get();
if (status == null) {
return 0;
}
final Integer value = status.getFlowFilesReceived();
return (value == null) ? 0 : value;
}
});
metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int64", "Bytes Received Last 5 mins"), new Gauge<Long>() {
@Override
public Long value() {
final ProcessGroupStatus status = latestStatus.get();
if (status == null) {
return 0L;
}
return status.getBytesReceived();
}
});
metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int32", "FlowFiles Sent Last 5 mins"), new Gauge<Integer>() {
@Override
public Integer value() {
final ProcessGroupStatus status = latestStatus.get();
if (status == null) {
return 0;
}
return status.getFlowFilesSent();
}
});
metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int64", "Bytes Sent Last 5 mins"), new Gauge<Long>() {
@Override
public Long value() {
final ProcessGroupStatus status = latestStatus.get();
if (status == null) {
return 0L;
}
return status.getBytesSent();
}
});
metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int32", "FlowFiles Queued"), new Gauge<Integer>() {
@Override
public Integer value() {
final ProcessGroupStatus status = latestStatus.get();
if (status == null) {
return 0;
}
final Integer value = status.getQueuedCount();
return (value == null) ? 0 : value;
}
});
metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int64", "Bytes Queued"), new Gauge<Long>() {
@Override
public Long value() {
final ProcessGroupStatus status = latestStatus.get();
if (status == null) {
return 0L;
}
final Long value = status.getQueuedContentSize();
return (value == null) ? 0L : value;
}
});
metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int64", "Bytes Read (5 mins)"), new Gauge<Long>() {
@Override
public Long value() {
final ProcessGroupStatus status = latestStatus.get();
if (status == null) {
return 0L;
}
final Long value = status.getBytesRead();
return (value == null) ? 0L : value;
}
});
metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int64", "Bytes Written (5 mins)"), new Gauge<Long>() {
@Override
public Long value() {
final ProcessGroupStatus status = latestStatus.get();
if (status == null) {
return 0L;
}
final Long value = status.getBytesWritten();
return (value == null) ? 0L : value;
}
});
metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int32", "Active Threads"), new Gauge<Integer>() {
@Override
public Integer value() {
final ProcessGroupStatus status = latestStatus.get();
if (status == null) {
return 0;
}
final Integer value = status.getActiveThreadCount();
return (value == null) ? 0 : value;
}
});
metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int32", "Total Task Duration Seconds"), new Gauge<Integer>() {
@Override
public Integer value() {
final ProcessGroupStatus status = latestStatus.get();
if (status == null) {
return 0;
}
final long nanos = calculateProcessingNanos(status);
return (int) TimeUnit.NANOSECONDS.toSeconds(nanos);
}
});
final String gangliaHost = config.getProperty(HOSTNAME).getValue();
final int port = config.getProperty(PORT).asInteger();
try {
gangliaReporter = new GangliaReporter(metricsRegistry, gangliaHost, port, METRICS_GROUP) {
@Override
protected String sanitizeName(MetricName name) {
return name.getName();
}
};
gangliaReporter.printVMMetrics = config.getProperty(SEND_JVM_METRICS).asBoolean();
} catch (final IOException e) {
throw new InitializationException(e);
}
}
Aggregations