use of org.apache.nifi.components.PropertyValue in project nifi by apache.
the class RouteText method onTrigger.
@Override
@SuppressWarnings({ "unchecked", "rawtypes" })
public void onTrigger(final ProcessContext context, final ProcessSession session) {
final FlowFile originalFlowFile = session.get();
if (originalFlowFile == null) {
return;
}
final ComponentLog logger = getLogger();
final Charset charset = Charset.forName(context.getProperty(CHARACTER_SET).getValue());
final boolean trim = context.getProperty(TRIM_WHITESPACE).asBoolean();
final String routeStrategy = context.getProperty(ROUTE_STRATEGY).getValue();
final String matchStrategy = context.getProperty(MATCH_STRATEGY).getValue();
final boolean ignoreCase = context.getProperty(IGNORE_CASE).asBoolean();
final boolean compileRegex = matchStrategy.equals(matchesRegularExpressionValue) || matchStrategy.equals(containsRegularExpressionValue);
final boolean usePropValue = matchStrategy.equals(satisfiesExpression);
// Build up a Map of Relationship to object, where the object is the
// thing that each line is compared against
final Map<Relationship, Object> propValueMap;
final Map<Relationship, PropertyValue> propMap = this.propertyMap;
if (usePropValue) {
// If we are using an Expression Language we want a Map where the value is the
// PropertyValue, so we can just use the 'propMap' - no need to copy it.
propValueMap = (Map) propMap;
} else {
propValueMap = new HashMap<>(propMap.size());
for (final Map.Entry<Relationship, PropertyValue> entry : propMap.entrySet()) {
final String value = entry.getValue().evaluateAttributeExpressions(originalFlowFile).getValue();
propValueMap.put(entry.getKey(), compileRegex ? cachedCompiledPattern(value, ignoreCase) : value);
}
}
final Map<Relationship, Map<Group, FlowFile>> flowFileMap = new HashMap<>();
final Pattern groupPattern = groupingRegex;
session.read(originalFlowFile, new InputStreamCallback() {
@Override
public void process(final InputStream in) throws IOException {
try (final Reader inReader = new InputStreamReader(in, charset);
final NLKBufferedReader reader = new NLKBufferedReader(inReader)) {
final Map<String, String> variables = new HashMap<>(2);
int lineCount = 0;
String line;
while ((line = reader.readLine()) != null) {
final String matchLine;
if (trim) {
matchLine = line.trim();
} else {
// Always trim off the new-line and carriage return characters before evaluating the line.
// The NLKBufferedReader maintains these characters so that when we write the line out we can maintain
// these characters. However, we don't actually want to match against these characters.
final String lineWithoutEndings;
final int indexOfCR = line.indexOf("\r");
final int indexOfNL = line.indexOf("\n");
if (indexOfCR > 0 && indexOfNL > 0) {
lineWithoutEndings = line.substring(0, Math.min(indexOfCR, indexOfNL));
} else if (indexOfCR > 0) {
lineWithoutEndings = line.substring(0, indexOfCR);
} else if (indexOfNL > 0) {
lineWithoutEndings = line.substring(0, indexOfNL);
} else {
lineWithoutEndings = line;
}
matchLine = lineWithoutEndings;
}
variables.put("line", line);
variables.put("lineNo", String.valueOf(++lineCount));
int propertiesThatMatchedLine = 0;
for (final Map.Entry<Relationship, Object> entry : propValueMap.entrySet()) {
boolean lineMatchesProperty = lineMatches(matchLine, entry.getValue(), matchStrategy, ignoreCase, originalFlowFile, variables);
if (lineMatchesProperty) {
propertiesThatMatchedLine++;
}
if (lineMatchesProperty && ROUTE_TO_MATCHING_PROPERTY_NAME.getValue().equals(routeStrategy)) {
// route each individual line to each Relationship that matches. This one matches.
final Relationship relationship = entry.getKey();
final Group group = getGroup(matchLine, groupPattern);
appendLine(session, flowFileMap, relationship, originalFlowFile, line, charset, group);
continue;
}
// break as soon as possible to avoid calculating things we don't need to calculate.
if (lineMatchesProperty && ROUTE_TO_MATCHED_WHEN_ANY_PROPERTY_MATCHES.getValue().equals(routeStrategy)) {
break;
}
if (!lineMatchesProperty && ROUTE_TO_MATCHED_WHEN_ALL_PROPERTIES_MATCH.getValue().equals(routeStrategy)) {
break;
}
}
final Relationship relationship;
if (ROUTE_TO_MATCHING_PROPERTY_NAME.getValue().equals(routeStrategy) && propertiesThatMatchedLine > 0) {
// Set relationship to null so that we do not append the line to each FlowFile again. #appendLine is called
// above within the loop, as the line may need to go to multiple different FlowFiles.
relationship = null;
} else if (ROUTE_TO_MATCHED_WHEN_ANY_PROPERTY_MATCHES.getValue().equals(routeStrategy) && propertiesThatMatchedLine > 0) {
relationship = REL_MATCH;
} else if (ROUTE_TO_MATCHED_WHEN_ALL_PROPERTIES_MATCH.getValue().equals(routeStrategy) && propertiesThatMatchedLine == propValueMap.size()) {
relationship = REL_MATCH;
} else {
relationship = REL_NO_MATCH;
}
if (relationship != null) {
final Group group = getGroup(matchLine, groupPattern);
appendLine(session, flowFileMap, relationship, originalFlowFile, line, charset, group);
}
}
}
}
});
for (final Map.Entry<Relationship, Map<Group, FlowFile>> entry : flowFileMap.entrySet()) {
final Relationship relationship = entry.getKey();
final Map<Group, FlowFile> groupToFlowFileMap = entry.getValue();
for (final Map.Entry<Group, FlowFile> flowFileEntry : groupToFlowFileMap.entrySet()) {
final Group group = flowFileEntry.getKey();
final FlowFile flowFile = flowFileEntry.getValue();
final Map<String, String> attributes = new HashMap<>(2);
attributes.put(ROUTE_ATTRIBUTE_KEY, relationship.getName());
attributes.put(GROUP_ATTRIBUTE_KEY, StringUtils.join(group.getCapturedValues(), ", "));
logger.info("Created {} from {}; routing to relationship {}", new Object[] { flowFile, originalFlowFile, relationship.getName() });
FlowFile updatedFlowFile = session.putAllAttributes(flowFile, attributes);
session.getProvenanceReporter().route(updatedFlowFile, entry.getKey());
session.transfer(updatedFlowFile, entry.getKey());
}
}
// now transfer the original flow file
FlowFile flowFile = originalFlowFile;
logger.info("Routing {} to {}", new Object[] { flowFile, REL_ORIGINAL });
session.getProvenanceReporter().route(originalFlowFile, REL_ORIGINAL);
flowFile = session.putAttribute(flowFile, ROUTE_ATTRIBUTE_KEY, REL_ORIGINAL.getName());
session.transfer(flowFile, REL_ORIGINAL);
}
use of org.apache.nifi.components.PropertyValue in project nifi by apache.
the class ValidateCsv method customValidate.
@Override
protected Collection<ValidationResult> customValidate(ValidationContext context) {
PropertyValue schemaProp = context.getProperty(SCHEMA);
String schema = schemaProp.getValue();
String subject = SCHEMA.getName();
if (context.isExpressionLanguageSupported(subject) && context.isExpressionLanguagePresent(schema)) {
return Collections.singletonList(new ValidationResult.Builder().subject(subject).input(schema).explanation("Expression Language Present").valid(true).build());
}
// If no Expression Language is present, try parsing the schema
try {
this.parseSchema(schema);
} catch (Exception e) {
final List<ValidationResult> problems = new ArrayList<>(1);
problems.add(new ValidationResult.Builder().subject(subject).input(schema).valid(false).explanation("Error while parsing the schema: " + e.getMessage()).build());
return problems;
}
return super.customValidate(context);
}
use of org.apache.nifi.components.PropertyValue in project nifi by apache.
the class Wait method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLogger();
// Signal id is computed from attribute 'RELEASE_SIGNAL_IDENTIFIER' with expression language support
final PropertyValue signalIdProperty = context.getProperty(RELEASE_SIGNAL_IDENTIFIER);
final Integer bufferCount = context.getProperty(WAIT_BUFFER_COUNT).asInteger();
final Map<Relationship, List<FlowFile>> processedFlowFiles = new HashMap<>();
final Function<Relationship, List<FlowFile>> getFlowFilesFor = r -> processedFlowFiles.computeIfAbsent(r, k -> new ArrayList<>());
final AtomicReference<String> targetSignalId = new AtomicReference<>();
final AtomicInteger bufferedCount = new AtomicInteger(0);
final List<FlowFile> failedFilteringFlowFiles = new ArrayList<>();
final Supplier<FlowFileFilter.FlowFileFilterResult> acceptResultSupplier = () -> bufferedCount.incrementAndGet() == bufferCount ? ACCEPT_AND_TERMINATE : ACCEPT_AND_CONTINUE;
final List<FlowFile> flowFiles = session.get(f -> {
final String fSignalId = signalIdProperty.evaluateAttributeExpressions(f).getValue();
// if the computed value is null, or empty, we transfer the FlowFile to failure relationship
if (StringUtils.isBlank(fSignalId)) {
// We can't penalize f before getting it from session, so keep it in a temporal list.
logger.error("FlowFile {} has no attribute for given Release Signal Identifier", new Object[] { f });
failedFilteringFlowFiles.add(f);
return ACCEPT_AND_CONTINUE;
}
final String targetSignalIdStr = targetSignalId.get();
if (targetSignalIdStr == null) {
// This is the first one.
targetSignalId.set(fSignalId);
return acceptResultSupplier.get();
}
if (targetSignalIdStr.equals(fSignalId)) {
return acceptResultSupplier.get();
}
return REJECT_AND_CONTINUE;
});
final String attributeCopyMode = context.getProperty(ATTRIBUTE_COPY_MODE).getValue();
final boolean replaceOriginalAttributes = ATTRIBUTE_COPY_REPLACE.getValue().equals(attributeCopyMode);
final AtomicReference<Signal> signalRef = new AtomicReference<>();
// This map contains original counts before those are consumed to release incoming FlowFiles.
final HashMap<String, Long> originalSignalCounts = new HashMap<>();
final Consumer<FlowFile> transferToFailure = flowFile -> {
flowFile = session.penalize(flowFile);
getFlowFilesFor.apply(REL_FAILURE).add(flowFile);
};
final Consumer<Entry<Relationship, List<FlowFile>>> transferFlowFiles = routedFlowFiles -> {
Relationship relationship = routedFlowFiles.getKey();
if (REL_WAIT.equals(relationship)) {
final String waitMode = context.getProperty(WAIT_MODE).getValue();
if (WAIT_MODE_KEEP_IN_UPSTREAM.getValue().equals(waitMode)) {
// Transfer to self.
relationship = Relationship.SELF;
}
}
final List<FlowFile> flowFilesWithSignalAttributes = routedFlowFiles.getValue().stream().map(f -> copySignalAttributes(session, f, signalRef.get(), originalSignalCounts, replaceOriginalAttributes)).collect(Collectors.toList());
session.transfer(flowFilesWithSignalAttributes, relationship);
};
failedFilteringFlowFiles.forEach(f -> {
flowFiles.remove(f);
transferToFailure.accept(f);
});
if (flowFiles.isEmpty()) {
// If there was nothing but failed FlowFiles while filtering, transfer those and end immediately.
processedFlowFiles.entrySet().forEach(transferFlowFiles);
return;
}
// the cache client used to interact with the distributed cache
final AtomicDistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(AtomicDistributedMapCacheClient.class);
final WaitNotifyProtocol protocol = new WaitNotifyProtocol(cache);
final String signalId = targetSignalId.get();
final Signal signal;
// get notifying signal
try {
signal = protocol.getSignal(signalId);
if (signal != null) {
originalSignalCounts.putAll(signal.getCounts());
}
signalRef.set(signal);
} catch (final IOException e) {
throw new ProcessException(String.format("Failed to get signal for %s due to %s", signalId, e), e);
}
String targetCounterName = null;
long targetCount = 1;
int releasableFlowFileCount = 1;
final List<FlowFile> candidates = new ArrayList<>();
for (FlowFile flowFile : flowFiles) {
// Set wait start timestamp if it's not set yet
String waitStartTimestamp = flowFile.getAttribute(WAIT_START_TIMESTAMP);
if (waitStartTimestamp == null) {
waitStartTimestamp = String.valueOf(System.currentTimeMillis());
flowFile = session.putAttribute(flowFile, WAIT_START_TIMESTAMP, waitStartTimestamp);
}
long lWaitStartTimestamp;
try {
lWaitStartTimestamp = Long.parseLong(waitStartTimestamp);
} catch (NumberFormatException nfe) {
logger.error("{} has an invalid value '{}' on FlowFile {}", new Object[] { WAIT_START_TIMESTAMP, waitStartTimestamp, flowFile });
transferToFailure.accept(flowFile);
continue;
}
// check for expiration
long expirationDuration = context.getProperty(EXPIRATION_DURATION).asTimePeriod(TimeUnit.MILLISECONDS);
long now = System.currentTimeMillis();
if (now > (lWaitStartTimestamp + expirationDuration)) {
logger.info("FlowFile {} expired after {}ms", new Object[] { flowFile, (now - lWaitStartTimestamp) });
getFlowFilesFor.apply(REL_EXPIRED).add(flowFile);
continue;
}
// If there's no signal yet, then we don't have to evaluate target counts. Return immediately.
if (signal == null) {
if (logger.isDebugEnabled()) {
logger.debug("No release signal found for {} on FlowFile {} yet", new Object[] { signalId, flowFile });
}
getFlowFilesFor.apply(REL_WAIT).add(flowFile);
continue;
}
// Fix target counter name and count from current FlowFile, if those are not set yet.
if (candidates.isEmpty()) {
targetCounterName = context.getProperty(SIGNAL_COUNTER_NAME).evaluateAttributeExpressions(flowFile).getValue();
try {
targetCount = Long.valueOf(context.getProperty(TARGET_SIGNAL_COUNT).evaluateAttributeExpressions(flowFile).getValue());
} catch (final NumberFormatException e) {
transferToFailure.accept(flowFile);
logger.error("Failed to parse targetCount when processing {} due to {}", new Object[] { flowFile, e }, e);
continue;
}
try {
releasableFlowFileCount = Integer.valueOf(context.getProperty(RELEASABLE_FLOWFILE_COUNT).evaluateAttributeExpressions(flowFile).getValue());
} catch (final NumberFormatException e) {
transferToFailure.accept(flowFile);
logger.error("Failed to parse releasableFlowFileCount when processing {} due to {}", new Object[] { flowFile, e }, e);
continue;
}
}
// FlowFile is now validated and added to candidates.
candidates.add(flowFile);
}
boolean waitCompleted = false;
boolean waitProgressed = false;
if (signal != null && !candidates.isEmpty()) {
if (releasableFlowFileCount > 0) {
signal.releaseCandidates(targetCounterName, targetCount, releasableFlowFileCount, candidates, released -> getFlowFilesFor.apply(REL_SUCCESS).addAll(released), waiting -> getFlowFilesFor.apply(REL_WAIT).addAll(waiting));
waitCompleted = signal.getTotalCount() == 0 && signal.getReleasableCount() == 0;
waitProgressed = !getFlowFilesFor.apply(REL_SUCCESS).isEmpty();
} else {
boolean reachedTargetCount = StringUtils.isBlank(targetCounterName) ? signal.isTotalCountReached(targetCount) : signal.isCountReached(targetCounterName, targetCount);
if (reachedTargetCount) {
getFlowFilesFor.apply(REL_SUCCESS).addAll(candidates);
} else {
getFlowFilesFor.apply(REL_WAIT).addAll(candidates);
}
}
}
// Transfer FlowFiles.
processedFlowFiles.entrySet().forEach(transferFlowFiles);
// Update signal if needed.
try {
if (waitCompleted) {
protocol.complete(signalId);
} else if (waitProgressed) {
protocol.replace(signal);
}
} catch (final IOException e) {
session.rollback();
throw new ProcessException(String.format("Unable to communicate with cache while updating %s due to %s", signalId, e), e);
}
}
use of org.apache.nifi.components.PropertyValue in project nifi by apache.
the class RecordBinManager method createThresholds.
private RecordBinThresholds createThresholds() {
final int minRecords = context.getProperty(MergeRecord.MIN_RECORDS).asInteger();
final int maxRecords = context.getProperty(MergeRecord.MAX_RECORDS).asInteger();
final long minBytes = context.getProperty(MergeRecord.MIN_SIZE).asDataSize(DataUnit.B).longValue();
final PropertyValue maxSizeValue = context.getProperty(MergeRecord.MAX_SIZE);
final long maxBytes = maxSizeValue.isSet() ? maxSizeValue.asDataSize(DataUnit.B).longValue() : Long.MAX_VALUE;
final PropertyValue maxMillisValue = context.getProperty(MergeRecord.MAX_BIN_AGE);
final String maxBinAge = maxMillisValue.getValue();
final long maxBinMillis = maxMillisValue.isSet() ? maxMillisValue.asTimePeriod(TimeUnit.MILLISECONDS).longValue() : Long.MAX_VALUE;
final String recordCountAttribute;
final String mergeStrategy = context.getProperty(MergeRecord.MERGE_STRATEGY).getValue();
if (MergeRecord.MERGE_STRATEGY_DEFRAGMENT.getValue().equals(mergeStrategy)) {
recordCountAttribute = MergeContent.FRAGMENT_COUNT_ATTRIBUTE;
} else {
recordCountAttribute = null;
}
return new RecordBinThresholds(minRecords, maxRecords, minBytes, maxBytes, maxBinMillis, maxBinAge, recordCountAttribute);
}
use of org.apache.nifi.components.PropertyValue in project nifi by apache.
the class SFTPTransfer method getChannel.
private ChannelSftp getChannel(final FlowFile flowFile) throws IOException {
if (sftp != null) {
String sessionhost = session.getHost();
String desthost = ctx.getProperty(HOSTNAME).evaluateAttributeExpressions(flowFile).getValue();
if (sessionhost.equals(desthost)) {
// destination matches so we can keep our current session
return sftp;
} else {
// this flowFile is going to a different destination, reset session
close();
}
}
final JSch jsch = new JSch();
try {
final String username = ctx.getProperty(USERNAME).evaluateAttributeExpressions(flowFile).getValue();
final Session session = jsch.getSession(username, ctx.getProperty(HOSTNAME).evaluateAttributeExpressions(flowFile).getValue(), ctx.getProperty(PORT).evaluateAttributeExpressions(flowFile).asInteger().intValue());
final String hostKeyVal = ctx.getProperty(HOST_KEY_FILE).getValue();
if (hostKeyVal != null) {
jsch.setKnownHosts(hostKeyVal);
}
final Properties properties = new Properties();
properties.setProperty("StrictHostKeyChecking", ctx.getProperty(STRICT_HOST_KEY_CHECKING).asBoolean() ? "yes" : "no");
properties.setProperty("PreferredAuthentications", "publickey,password,keyboard-interactive");
final PropertyValue compressionValue = ctx.getProperty(FileTransfer.USE_COMPRESSION);
if (compressionValue != null && "true".equalsIgnoreCase(compressionValue.getValue())) {
properties.setProperty("compression.s2c", "zlib@openssh.com,zlib,none");
properties.setProperty("compression.c2s", "zlib@openssh.com,zlib,none");
} else {
properties.setProperty("compression.s2c", "none");
properties.setProperty("compression.c2s", "none");
}
session.setConfig(properties);
final String privateKeyFile = ctx.getProperty(PRIVATE_KEY_PATH).evaluateAttributeExpressions(flowFile).getValue();
if (privateKeyFile != null) {
jsch.addIdentity(privateKeyFile, ctx.getProperty(PRIVATE_KEY_PASSPHRASE).evaluateAttributeExpressions(flowFile).getValue());
}
final String password = ctx.getProperty(FileTransfer.PASSWORD).evaluateAttributeExpressions(flowFile).getValue();
if (password != null) {
session.setPassword(password);
}
final int connectionTimeoutMillis = ctx.getProperty(FileTransfer.CONNECTION_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue();
session.setTimeout(connectionTimeoutMillis);
session.connect();
this.session = session;
this.closed = false;
sftp = (ChannelSftp) session.openChannel("sftp");
sftp.connect(connectionTimeoutMillis);
session.setTimeout(ctx.getProperty(FileTransfer.DATA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).intValue());
if (!ctx.getProperty(USE_KEEPALIVE_ON_TIMEOUT).asBoolean()) {
// do not send keepalive message on SocketTimeoutException
session.setServerAliveCountMax(0);
}
try {
this.homeDir = sftp.getHome();
} catch (SftpException e) {
// For some combination of server configuration and user home directory, getHome() can fail with "2: File not found"
// Since homeDir is only used tor SEND provenance event transit uri, this is harmless. Log and continue.
logger.debug("Failed to retrieve {} home directory due to {}", new Object[] { username, e.getMessage() });
}
return sftp;
} catch (JSchException e) {
throw new IOException("Failed to obtain connection to remote host due to " + e.toString(), e);
}
}
Aggregations