Search in sources :

Example 21 with OnScheduled

use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.

the class ExtractGrok method onScheduled.

@OnScheduled
public void onScheduled(final ProcessContext context) throws GrokException {
    for (int i = 0; i < context.getMaxConcurrentTasks(); i++) {
        final int maxBufferSize = context.getProperty(MAX_BUFFER_SIZE).asDataSize(DataUnit.B).intValue();
        final byte[] buffer = new byte[maxBufferSize];
        bufferQueue.add(buffer);
    }
    grok = new Grok();
    grok.addPatternFromFile(context.getProperty(GROK_PATTERN_FILE).getValue());
    grok.compile(context.getProperty(GROK_EXPRESSION).getValue(), context.getProperty(NAMED_CAPTURES_ONLY).asBoolean());
}
Also used : Grok(io.thekraken.grok.api.Grok) OnScheduled(org.apache.nifi.annotation.lifecycle.OnScheduled)

Example 22 with OnScheduled

use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.

the class ExtractText method onScheduled.

@OnScheduled
public final void onScheduled(final ProcessContext context) throws IOException {
    final Map<String, Pattern> compiledPatternsMap = new HashMap<>();
    for (final Map.Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) {
        if (!entry.getKey().isDynamic()) {
            continue;
        }
        final int flags = getCompileFlags(context);
        final Pattern pattern = Pattern.compile(entry.getValue(), flags);
        compiledPatternsMap.put(entry.getKey().getName(), pattern);
    }
    compiledPattersMapRef.set(compiledPatternsMap);
    for (int i = 0; i < context.getMaxConcurrentTasks(); i++) {
        final int maxBufferSize = context.getProperty(MAX_BUFFER_SIZE).asDataSize(DataUnit.B).intValue();
        final byte[] buffer = new byte[maxBufferSize];
        bufferQueue.add(buffer);
    }
}
Also used : Pattern(java.util.regex.Pattern) PropertyDescriptor(org.apache.nifi.components.PropertyDescriptor) HashMap(java.util.HashMap) HashMap(java.util.HashMap) Map(java.util.Map) OnScheduled(org.apache.nifi.annotation.lifecycle.OnScheduled)

Example 23 with OnScheduled

use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.

the class GetTwitter method onScheduled.

@OnScheduled
public void onScheduled(final ProcessContext context) throws MalformedURLException {
    final String endpointName = context.getProperty(ENDPOINT).getValue();
    final Authentication oauth = new OAuth1(context.getProperty(CONSUMER_KEY).getValue(), context.getProperty(CONSUMER_SECRET).getValue(), context.getProperty(ACCESS_TOKEN).getValue(), context.getProperty(ACCESS_TOKEN_SECRET).getValue());
    final ClientBuilder clientBuilder = new ClientBuilder();
    clientBuilder.name("GetTwitter[id=" + getIdentifier() + "]").authentication(oauth).eventMessageQueue(eventQueue).processor(new StringDelimitedProcessor(messageQueue));
    final String languageString = context.getProperty(LANGUAGES).getValue();
    final List<String> languages;
    if (languageString == null) {
        languages = null;
    } else {
        languages = new ArrayList<>();
        for (final String language : context.getProperty(LANGUAGES).getValue().split(",")) {
            languages.add(language.trim());
        }
    }
    final String host;
    final StreamingEndpoint streamingEndpoint;
    if (ENDPOINT_SAMPLE.getValue().equals(endpointName)) {
        host = Constants.STREAM_HOST;
        final StatusesSampleEndpoint sse = new StatusesSampleEndpoint();
        streamingEndpoint = sse;
        if (languages != null) {
            sse.languages(languages);
        }
    } else if (ENDPOINT_FIREHOSE.getValue().equals(endpointName)) {
        host = Constants.STREAM_HOST;
        final StatusesFirehoseEndpoint firehoseEndpoint = new StatusesFirehoseEndpoint();
        streamingEndpoint = firehoseEndpoint;
        if (languages != null) {
            firehoseEndpoint.languages(languages);
        }
    } else if (ENDPOINT_FILTER.getValue().equals(endpointName)) {
        host = Constants.STREAM_HOST;
        final StatusesFilterEndpoint filterEndpoint = new StatusesFilterEndpoint();
        final String followingString = context.getProperty(FOLLOWING).getValue();
        final List<Long> followingIds;
        if (followingString == null) {
            followingIds = Collections.emptyList();
        } else {
            followingIds = new ArrayList<>();
            for (final String split : followingString.split(",")) {
                final Long id = Long.parseLong(split.trim());
                followingIds.add(id);
            }
        }
        final String termString = context.getProperty(TERMS).getValue();
        final List<String> terms;
        if (termString == null) {
            terms = Collections.emptyList();
        } else {
            terms = new ArrayList<>();
            for (final String split : termString.split(",")) {
                terms.add(split.trim());
            }
        }
        if (!terms.isEmpty()) {
            filterEndpoint.trackTerms(terms);
        }
        if (!followingIds.isEmpty()) {
            filterEndpoint.followings(followingIds);
        }
        if (languages != null) {
            filterEndpoint.languages(languages);
        }
        final String locationString = context.getProperty(LOCATIONS).getValue();
        final List<Location> locations;
        if (locationString == null) {
            locations = Collections.emptyList();
        } else {
            locations = LocationUtil.parseLocations(locationString);
        }
        if (!locations.isEmpty()) {
            filterEndpoint.locations(locations);
        }
        streamingEndpoint = filterEndpoint;
    } else {
        throw new AssertionError("Endpoint was invalid value: " + endpointName);
    }
    clientBuilder.hosts(host).endpoint(streamingEndpoint);
    client = clientBuilder.build();
    client.connect();
}
Also used : OAuth1(com.twitter.hbc.httpclient.auth.OAuth1) StreamingEndpoint(com.twitter.hbc.core.endpoint.StreamingEndpoint) Authentication(com.twitter.hbc.httpclient.auth.Authentication) StatusesSampleEndpoint(com.twitter.hbc.core.endpoint.StatusesSampleEndpoint) StatusesFirehoseEndpoint(com.twitter.hbc.core.endpoint.StatusesFirehoseEndpoint) StringDelimitedProcessor(com.twitter.hbc.core.processor.StringDelimitedProcessor) StatusesFilterEndpoint(com.twitter.hbc.core.endpoint.StatusesFilterEndpoint) ClientBuilder(com.twitter.hbc.ClientBuilder) Location(com.twitter.hbc.core.endpoint.Location) OnScheduled(org.apache.nifi.annotation.lifecycle.OnScheduled)

Example 24 with OnScheduled

use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.

the class MonitorMemory method onConfigured.

@OnScheduled
public void onConfigured(final ConfigurationContext config) throws InitializationException {
    final String desiredMemoryPoolName = config.getProperty(MEMORY_POOL_PROPERTY).getValue();
    final String thresholdValue = config.getProperty(THRESHOLD_PROPERTY).getValue().trim();
    threshold = thresholdValue;
    final Long reportingIntervalValue = config.getProperty(REPORTING_INTERVAL).asTimePeriod(TimeUnit.MILLISECONDS);
    if (reportingIntervalValue == null) {
        reportingIntervalMillis = config.getSchedulingPeriod(TimeUnit.MILLISECONDS);
    } else {
        reportingIntervalMillis = reportingIntervalValue;
    }
    final List<MemoryPoolMXBean> memoryPoolBeans = ManagementFactory.getMemoryPoolMXBeans();
    for (int i = 0; i < memoryPoolBeans.size() && monitoredBean == null; i++) {
        MemoryPoolMXBean memoryPoolBean = memoryPoolBeans.get(i);
        String memoryPoolName = memoryPoolBean.getName();
        if (desiredMemoryPoolName.equals(memoryPoolName)) {
            monitoredBean = memoryPoolBean;
            if (memoryPoolBean.isCollectionUsageThresholdSupported()) {
                long calculatedThreshold;
                if (DATA_SIZE_PATTERN.matcher(thresholdValue).matches()) {
                    calculatedThreshold = DataUnit.parseDataSize(thresholdValue, DataUnit.B).longValue();
                } else {
                    final String percentage = thresholdValue.substring(0, thresholdValue.length() - 1);
                    final double pct = Double.parseDouble(percentage) / 100D;
                    calculatedThreshold = (long) (monitoredBean.getUsage().getMax() * pct);
                }
                monitoredBean.setUsageThreshold(calculatedThreshold);
            }
        }
    }
    if (monitoredBean == null) {
        throw new InitializationException("Found no JVM Memory Pool with name " + desiredMemoryPoolName + "; will not monitor Memory Pool");
    }
}
Also used : MemoryPoolMXBean(java.lang.management.MemoryPoolMXBean) InitializationException(org.apache.nifi.reporting.InitializationException) OnScheduled(org.apache.nifi.annotation.lifecycle.OnScheduled)

Example 25 with OnScheduled

use of org.apache.nifi.annotation.lifecycle.OnScheduled in project nifi by apache.

the class StandardGangliaReporter method onConfigure.

@OnScheduled
public void onConfigure(final ConfigurationContext config) throws InitializationException {
    metricsRegistry = new MetricsRegistry();
    metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int32", "FlowFiles Received Last 5 mins"), new Gauge<Integer>() {

        @Override
        public Integer value() {
            final ProcessGroupStatus status = latestStatus.get();
            if (status == null) {
                return 0;
            }
            final Integer value = status.getFlowFilesReceived();
            return (value == null) ? 0 : value;
        }
    });
    metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int64", "Bytes Received Last 5 mins"), new Gauge<Long>() {

        @Override
        public Long value() {
            final ProcessGroupStatus status = latestStatus.get();
            if (status == null) {
                return 0L;
            }
            return status.getBytesReceived();
        }
    });
    metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int32", "FlowFiles Sent Last 5 mins"), new Gauge<Integer>() {

        @Override
        public Integer value() {
            final ProcessGroupStatus status = latestStatus.get();
            if (status == null) {
                return 0;
            }
            return status.getFlowFilesSent();
        }
    });
    metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int64", "Bytes Sent Last 5 mins"), new Gauge<Long>() {

        @Override
        public Long value() {
            final ProcessGroupStatus status = latestStatus.get();
            if (status == null) {
                return 0L;
            }
            return status.getBytesSent();
        }
    });
    metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int32", "FlowFiles Queued"), new Gauge<Integer>() {

        @Override
        public Integer value() {
            final ProcessGroupStatus status = latestStatus.get();
            if (status == null) {
                return 0;
            }
            final Integer value = status.getQueuedCount();
            return (value == null) ? 0 : value;
        }
    });
    metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int64", "Bytes Queued"), new Gauge<Long>() {

        @Override
        public Long value() {
            final ProcessGroupStatus status = latestStatus.get();
            if (status == null) {
                return 0L;
            }
            final Long value = status.getQueuedContentSize();
            return (value == null) ? 0L : value;
        }
    });
    metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int64", "Bytes Read (5 mins)"), new Gauge<Long>() {

        @Override
        public Long value() {
            final ProcessGroupStatus status = latestStatus.get();
            if (status == null) {
                return 0L;
            }
            final Long value = status.getBytesRead();
            return (value == null) ? 0L : value;
        }
    });
    metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int64", "Bytes Written (5 mins)"), new Gauge<Long>() {

        @Override
        public Long value() {
            final ProcessGroupStatus status = latestStatus.get();
            if (status == null) {
                return 0L;
            }
            final Long value = status.getBytesWritten();
            return (value == null) ? 0L : value;
        }
    });
    metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int32", "Active Threads"), new Gauge<Integer>() {

        @Override
        public Integer value() {
            final ProcessGroupStatus status = latestStatus.get();
            if (status == null) {
                return 0;
            }
            final Integer value = status.getActiveThreadCount();
            return (value == null) ? 0 : value;
        }
    });
    metricsRegistry.newGauge(new MetricName(METRICS_GROUP, "int32", "Total Task Duration Seconds"), new Gauge<Integer>() {

        @Override
        public Integer value() {
            final ProcessGroupStatus status = latestStatus.get();
            if (status == null) {
                return 0;
            }
            final long nanos = calculateProcessingNanos(status);
            return (int) TimeUnit.NANOSECONDS.toSeconds(nanos);
        }
    });
    final String gangliaHost = config.getProperty(HOSTNAME).getValue();
    final int port = config.getProperty(PORT).asInteger();
    try {
        gangliaReporter = new GangliaReporter(metricsRegistry, gangliaHost, port, METRICS_GROUP) {

            @Override
            protected String sanitizeName(MetricName name) {
                return name.getName();
            }
        };
        gangliaReporter.printVMMetrics = config.getProperty(SEND_JVM_METRICS).asBoolean();
    } catch (final IOException e) {
        throw new InitializationException(e);
    }
}
Also used : ProcessGroupStatus(org.apache.nifi.controller.status.ProcessGroupStatus) MetricsRegistry(com.yammer.metrics.core.MetricsRegistry) GangliaReporter(com.yammer.metrics.reporting.GangliaReporter) IOException(java.io.IOException) InitializationException(org.apache.nifi.reporting.InitializationException) MetricName(com.yammer.metrics.core.MetricName) OnScheduled(org.apache.nifi.annotation.lifecycle.OnScheduled)

Aggregations

OnScheduled (org.apache.nifi.annotation.lifecycle.OnScheduled)59 PropertyDescriptor (org.apache.nifi.components.PropertyDescriptor)12 ProcessException (org.apache.nifi.processor.exception.ProcessException)12 IOException (java.io.IOException)10 HashMap (java.util.HashMap)10 ComponentLog (org.apache.nifi.logging.ComponentLog)8 SSLContextService (org.apache.nifi.ssl.SSLContextService)7 Map (java.util.Map)6 SSLContext (javax.net.ssl.SSLContext)6 StateMap (org.apache.nifi.components.state.StateMap)5 FileInputStream (java.io.FileInputStream)4 PropertyValue (org.apache.nifi.components.PropertyValue)4 RestrictedSSLContextService (org.apache.nifi.ssl.RestrictedSSLContextService)4 InetAddress (java.net.InetAddress)3 CacheLoader (com.google.common.cache.CacheLoader)2 SslContextBuilder (io.netty.handler.ssl.SslContextBuilder)2 File (java.io.File)2 InputStream (java.io.InputStream)2 InetSocketAddress (java.net.InetSocketAddress)2 NetworkInterface (java.net.NetworkInterface)2