Search in sources :

Example 31 with ComponentLog

use of org.apache.nifi.logging.ComponentLog in project nifi by apache.

the class PutDatabaseRecord method executeDML.

private void executeDML(ProcessContext context, ProcessSession session, FlowFile flowFile, FunctionContext functionContext, RoutingResult result, Connection con, RecordReader recordParser, String statementType, DMLSettings settings) throws IllegalArgumentException, MalformedRecordException, IOException, SQLException {
    final RecordSchema recordSchema = recordParser.getSchema();
    final ComponentLog log = getLogger();
    final String catalog = context.getProperty(CATALOG_NAME).evaluateAttributeExpressions(flowFile).getValue();
    final String schemaName = context.getProperty(SCHEMA_NAME).evaluateAttributeExpressions(flowFile).getValue();
    final String tableName = context.getProperty(TABLE_NAME).evaluateAttributeExpressions(flowFile).getValue();
    final String updateKeys = context.getProperty(UPDATE_KEYS).evaluateAttributeExpressions(flowFile).getValue();
    final SchemaKey schemaKey = new PutDatabaseRecord.SchemaKey(catalog, schemaName, tableName);
    // Ensure the table name has been set, the generated SQL statements (and TableSchema cache) will need it
    if (StringUtils.isEmpty(tableName)) {
        throw new IllegalArgumentException(format("Cannot process %s because Table Name is null or empty", flowFile));
    }
    // Always get the primary keys if Update Keys is empty. Otherwise if we have an Insert statement first, the table will be
    // cached but the primary keys will not be retrieved, causing future UPDATE statements to not have primary keys available
    final boolean includePrimaryKeys = updateKeys == null;
    // get the database schema from the cache, if one exists. We do this in a synchronized block, rather than
    // using a ConcurrentMap because the Map that we are using is a LinkedHashMap with a capacity such that if
    // the Map grows beyond this capacity, old elements are evicted. We do this in order to avoid filling the
    // Java Heap if there are a lot of different SQL statements being generated that reference different tables.
    TableSchema tableSchema;
    synchronized (this) {
        tableSchema = schemaCache.get(schemaKey);
        if (tableSchema == null) {
            // No schema exists for this table yet. Query the database to determine the schema and put it into the cache.
            tableSchema = TableSchema.from(con, catalog, schemaName, tableName, settings.translateFieldNames, includePrimaryKeys);
            schemaCache.put(schemaKey, tableSchema);
        }
    }
    if (tableSchema == null) {
        throw new IllegalArgumentException("No table schema specified!");
    }
    // build the fully qualified table name
    final StringBuilder tableNameBuilder = new StringBuilder();
    if (catalog != null) {
        tableNameBuilder.append(catalog).append(".");
    }
    if (schemaName != null) {
        tableNameBuilder.append(schemaName).append(".");
    }
    tableNameBuilder.append(tableName);
    final String fqTableName = tableNameBuilder.toString();
    if (recordSchema == null) {
        throw new IllegalArgumentException("No record schema specified!");
    }
    final SqlAndIncludedColumns sqlHolder;
    if (INSERT_TYPE.equalsIgnoreCase(statementType)) {
        sqlHolder = generateInsert(recordSchema, fqTableName, tableSchema, settings);
    } else if (UPDATE_TYPE.equalsIgnoreCase(statementType)) {
        sqlHolder = generateUpdate(recordSchema, fqTableName, updateKeys, tableSchema, settings);
    } else if (DELETE_TYPE.equalsIgnoreCase(statementType)) {
        sqlHolder = generateDelete(recordSchema, fqTableName, tableSchema, settings);
    } else {
        throw new IllegalArgumentException(format("Statement Type %s is not valid, FlowFile %s", statementType, flowFile));
    }
    try (PreparedStatement ps = con.prepareStatement(sqlHolder.getSql())) {
        final int queryTimeout = functionContext.queryTimeout;
        try {
            // timeout in seconds
            ps.setQueryTimeout(queryTimeout);
        } catch (SQLException se) {
            // If the driver doesn't support query timeout, then assume it is "infinite". Allow a timeout of zero only
            if (queryTimeout > 0) {
                throw se;
            }
        }
        Record currentRecord;
        List<Integer> fieldIndexes = sqlHolder.getFieldIndexes();
        while ((currentRecord = recordParser.nextRecord()) != null) {
            Object[] values = currentRecord.getValues();
            if (values != null) {
                if (fieldIndexes != null) {
                    for (int i = 0; i < fieldIndexes.size(); i++) {
                        // If DELETE type, insert the object twice because of the null check (see generateDelete for details)
                        if (DELETE_TYPE.equalsIgnoreCase(statementType)) {
                            ps.setObject(i * 2 + 1, values[fieldIndexes.get(i)]);
                            ps.setObject(i * 2 + 2, values[fieldIndexes.get(i)]);
                        } else {
                            ps.setObject(i + 1, values[fieldIndexes.get(i)]);
                        }
                    }
                } else {
                    // If there's no index map, assume all values are included and set them in order
                    for (int i = 0; i < values.length; i++) {
                        // If DELETE type, insert the object twice because of the null check (see generateDelete for details)
                        if (DELETE_TYPE.equalsIgnoreCase(statementType)) {
                            ps.setObject(i * 2 + 1, values[i]);
                            ps.setObject(i * 2 + 2, values[i]);
                        } else {
                            ps.setObject(i + 1, values[i]);
                        }
                    }
                }
                ps.addBatch();
            }
        }
        log.debug("Executing query {}", new Object[] { sqlHolder });
        ps.executeBatch();
        result.routeTo(flowFile, REL_SUCCESS);
        session.getProvenanceReporter().send(flowFile, functionContext.jdbcUrl);
    }
}
Also used : SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) ComponentLog(org.apache.nifi.logging.ComponentLog) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Record(org.apache.nifi.serialization.record.Record) RecordSchema(org.apache.nifi.serialization.record.RecordSchema)

Example 32 with ComponentLog

use of org.apache.nifi.logging.ComponentLog in project nifi by apache.

the class PutDistributedMapCache method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }
    final ComponentLog logger = getLogger();
    // cache key is computed from attribute 'CACHE_ENTRY_IDENTIFIER' with expression language support
    final String cacheKey = context.getProperty(CACHE_ENTRY_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue();
    // if the computed value is null, or empty, we transfer the flow file to failure relationship
    if (StringUtils.isBlank(cacheKey)) {
        logger.error("FlowFile {} has no attribute for given Cache Entry Identifier", new Object[] { flowFile });
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        return;
    }
    // the cache client used to interact with the distributed cache
    final DistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(DistributedMapCacheClient.class);
    try {
        final long maxCacheEntrySize = context.getProperty(CACHE_ENTRY_MAX_BYTES).asDataSize(DataUnit.B).longValue();
        long flowFileSize = flowFile.getSize();
        // too big flow file
        if (flowFileSize > maxCacheEntrySize) {
            logger.warn("Flow file {} size {} exceeds the max cache entry size ({} B).", new Object[] { flowFile, flowFileSize, maxCacheEntrySize });
            session.transfer(flowFile, REL_FAILURE);
            return;
        }
        if (flowFileSize == 0) {
            logger.warn("Flow file {} is empty, there is nothing to cache.", new Object[] { flowFile });
            session.transfer(flowFile, REL_FAILURE);
            return;
        }
        // get flow file content
        final ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
        session.exportTo(flowFile, byteStream);
        byte[] cacheValue = byteStream.toByteArray();
        final String updateStrategy = context.getProperty(CACHE_UPDATE_STRATEGY).getValue();
        boolean cached = false;
        if (updateStrategy.equals(CACHE_UPDATE_REPLACE.getValue())) {
            cache.put(cacheKey, cacheValue, keySerializer, valueSerializer);
            cached = true;
        } else if (updateStrategy.equals(CACHE_UPDATE_KEEP_ORIGINAL.getValue())) {
            final byte[] oldValue = cache.getAndPutIfAbsent(cacheKey, cacheValue, keySerializer, valueSerializer, valueDeserializer);
            if (oldValue == null) {
                cached = true;
            }
        }
        // set 'cached' attribute
        flowFile = session.putAttribute(flowFile, CACHED_ATTRIBUTE_NAME, String.valueOf(cached));
        if (cached) {
            session.transfer(flowFile, REL_SUCCESS);
        } else {
            session.transfer(flowFile, REL_FAILURE);
        }
    } catch (final IOException e) {
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
        logger.error("Unable to communicate with cache when processing {} due to {}", new Object[] { flowFile, e });
    }
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) DistributedMapCacheClient(org.apache.nifi.distributed.cache.client.DistributedMapCacheClient) ByteArrayOutputStream(java.io.ByteArrayOutputStream) IOException(java.io.IOException) ComponentLog(org.apache.nifi.logging.ComponentLog)

Example 33 with ComponentLog

use of org.apache.nifi.logging.ComponentLog in project nifi by apache.

the class PutEmail method getMailPropertiesFromFlowFile.

/**
 * Uses the mapping of javax.mail properties to NiFi PropertyDescriptors to build the required Properties object to be used for sending this email
 *
 * @param context context
 * @param flowFile flowFile
 * @return mail properties
 */
private Properties getMailPropertiesFromFlowFile(final ProcessContext context, final FlowFile flowFile) {
    final Properties properties = new Properties();
    final ComponentLog logger = this.getLogger();
    for (Entry<String, PropertyDescriptor> entry : propertyToContext.entrySet()) {
        // Evaluate the property descriptor against the flow file
        String flowFileValue = context.getProperty(entry.getValue()).evaluateAttributeExpressions(flowFile).getValue();
        String property = entry.getKey();
        logger.debug("Evaluated Mail Property: {} with Value: {}", new Object[] { property, flowFileValue });
        // Nullable values are not allowed, so filter out
        if (null != flowFileValue) {
            properties.setProperty(property, flowFileValue);
        }
    }
    return properties;
}
Also used : PropertyDescriptor(org.apache.nifi.components.PropertyDescriptor) Properties(java.util.Properties) ComponentLog(org.apache.nifi.logging.ComponentLog)

Example 34 with ComponentLog

use of org.apache.nifi.logging.ComponentLog in project nifi by apache.

the class PutFile method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }
    final StopWatch stopWatch = new StopWatch(true);
    final Path configuredRootDirPath = Paths.get(context.getProperty(DIRECTORY).evaluateAttributeExpressions(flowFile).getValue());
    final String conflictResponse = context.getProperty(CONFLICT_RESOLUTION).getValue();
    final Integer maxDestinationFiles = context.getProperty(MAX_DESTINATION_FILES).asInteger();
    final ComponentLog logger = getLogger();
    Path tempDotCopyFile = null;
    try {
        final Path rootDirPath = configuredRootDirPath;
        final Path tempCopyFile = rootDirPath.resolve("." + flowFile.getAttribute(CoreAttributes.FILENAME.key()));
        final Path copyFile = rootDirPath.resolve(flowFile.getAttribute(CoreAttributes.FILENAME.key()));
        if (!Files.exists(rootDirPath)) {
            if (context.getProperty(CREATE_DIRS).asBoolean()) {
                Files.createDirectories(rootDirPath);
            } else {
                flowFile = session.penalize(flowFile);
                session.transfer(flowFile, REL_FAILURE);
                logger.error("Penalizing {} and routing to 'failure' because the output directory {} does not exist and Processor is " + "configured not to create missing directories", new Object[] { flowFile, rootDirPath });
                return;
            }
        }
        final Path dotCopyFile = tempCopyFile;
        tempDotCopyFile = dotCopyFile;
        Path finalCopyFile = copyFile;
        final Path finalCopyFileDir = finalCopyFile.getParent();
        if (Files.exists(finalCopyFileDir) && maxDestinationFiles != null) {
            // check if too many files already
            final int numFiles = finalCopyFileDir.toFile().list().length;
            if (numFiles >= maxDestinationFiles) {
                flowFile = session.penalize(flowFile);
                logger.warn("Penalizing {} and routing to 'failure' because the output directory {} has {} files, which exceeds the " + "configured maximum number of files", new Object[] { flowFile, finalCopyFileDir, numFiles });
                session.transfer(flowFile, REL_FAILURE);
                return;
            }
        }
        if (Files.exists(finalCopyFile)) {
            switch(conflictResponse) {
                case REPLACE_RESOLUTION:
                    Files.delete(finalCopyFile);
                    logger.info("Deleted {} as configured in order to replace with the contents of {}", new Object[] { finalCopyFile, flowFile });
                    break;
                case IGNORE_RESOLUTION:
                    session.transfer(flowFile, REL_SUCCESS);
                    logger.info("Transferring {} to success because file with same name already exists", new Object[] { flowFile });
                    return;
                case FAIL_RESOLUTION:
                    flowFile = session.penalize(flowFile);
                    logger.warn("Penalizing {} and routing to failure as configured because file with the same name already exists", new Object[] { flowFile });
                    session.transfer(flowFile, REL_FAILURE);
                    return;
                default:
                    break;
            }
        }
        session.exportTo(flowFile, dotCopyFile, false);
        final String lastModifiedTime = context.getProperty(CHANGE_LAST_MODIFIED_TIME).evaluateAttributeExpressions(flowFile).getValue();
        if (lastModifiedTime != null && !lastModifiedTime.trim().isEmpty()) {
            try {
                final DateFormat formatter = new SimpleDateFormat(FILE_MODIFY_DATE_ATTR_FORMAT, Locale.US);
                final Date fileModifyTime = formatter.parse(lastModifiedTime);
                dotCopyFile.toFile().setLastModified(fileModifyTime.getTime());
            } catch (Exception e) {
                logger.warn("Could not set file lastModifiedTime to {} because {}", new Object[] { lastModifiedTime, e });
            }
        }
        final String permissions = context.getProperty(CHANGE_PERMISSIONS).evaluateAttributeExpressions(flowFile).getValue();
        if (permissions != null && !permissions.trim().isEmpty()) {
            try {
                String perms = stringPermissions(permissions);
                if (!perms.isEmpty()) {
                    Files.setPosixFilePermissions(dotCopyFile, PosixFilePermissions.fromString(perms));
                }
            } catch (Exception e) {
                logger.warn("Could not set file permissions to {} because {}", new Object[] { permissions, e });
            }
        }
        final String owner = context.getProperty(CHANGE_OWNER).evaluateAttributeExpressions(flowFile).getValue();
        if (owner != null && !owner.trim().isEmpty()) {
            try {
                UserPrincipalLookupService lookupService = dotCopyFile.getFileSystem().getUserPrincipalLookupService();
                Files.setOwner(dotCopyFile, lookupService.lookupPrincipalByName(owner));
            } catch (Exception e) {
                logger.warn("Could not set file owner to {} because {}", new Object[] { owner, e });
            }
        }
        final String group = context.getProperty(CHANGE_GROUP).evaluateAttributeExpressions(flowFile).getValue();
        if (group != null && !group.trim().isEmpty()) {
            try {
                UserPrincipalLookupService lookupService = dotCopyFile.getFileSystem().getUserPrincipalLookupService();
                PosixFileAttributeView view = Files.getFileAttributeView(dotCopyFile, PosixFileAttributeView.class);
                view.setGroup(lookupService.lookupPrincipalByGroupName(group));
            } catch (Exception e) {
                logger.warn("Could not set file group to {} because {}", new Object[] { group, e });
            }
        }
        boolean renamed = false;
        for (int i = 0; i < 10; i++) {
            // try rename up to 10 times.
            if (dotCopyFile.toFile().renameTo(finalCopyFile.toFile())) {
                renamed = true;
                // rename was successful
                break;
            }
            // try waiting a few ms to let whatever might cause rename failure to resolve
            Thread.sleep(100L);
        }
        if (!renamed) {
            if (Files.exists(dotCopyFile) && dotCopyFile.toFile().delete()) {
                logger.debug("Deleted dot copy file {}", new Object[] { dotCopyFile });
            }
            throw new ProcessException("Could not rename: " + dotCopyFile);
        } else {
            logger.info("Produced copy of {} at location {}", new Object[] { flowFile, finalCopyFile });
        }
        session.getProvenanceReporter().send(flowFile, finalCopyFile.toFile().toURI().toString(), stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(flowFile, REL_SUCCESS);
    } catch (final Throwable t) {
        if (tempDotCopyFile != null) {
            try {
                Files.deleteIfExists(tempDotCopyFile);
            } catch (final Exception e) {
                logger.error("Unable to remove temporary file {} due to {}", new Object[] { tempDotCopyFile, e });
            }
        }
        flowFile = session.penalize(flowFile);
        logger.error("Penalizing {} and transferring to failure due to {}", new Object[] { flowFile, t });
        session.transfer(flowFile, REL_FAILURE);
    }
}
Also used : Path(java.nio.file.Path) UserPrincipalLookupService(java.nio.file.attribute.UserPrincipalLookupService) FlowFile(org.apache.nifi.flowfile.FlowFile) ComponentLog(org.apache.nifi.logging.ComponentLog) Date(java.util.Date) ProcessException(org.apache.nifi.processor.exception.ProcessException) StopWatch(org.apache.nifi.util.StopWatch) PosixFileAttributeView(java.nio.file.attribute.PosixFileAttributeView) ProcessException(org.apache.nifi.processor.exception.ProcessException) SimpleDateFormat(java.text.SimpleDateFormat) DateFormat(java.text.DateFormat) SimpleDateFormat(java.text.SimpleDateFormat)

Example 35 with ComponentLog

use of org.apache.nifi.logging.ComponentLog in project nifi by apache.

the class PutFileTransfer method onTrigger.

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
    FlowFile flowFile = session.get();
    if (flowFile == null) {
        return;
    }
    final ComponentLog logger = getLogger();
    final String hostname = context.getProperty(FileTransfer.HOSTNAME).evaluateAttributeExpressions(flowFile).getValue();
    final int maxNumberOfFiles = context.getProperty(FileTransfer.BATCH_SIZE).asInteger();
    int fileCount = 0;
    try (final T transfer = getFileTransfer(context)) {
        do {
            final String rootPath = context.getProperty(FileTransfer.REMOTE_PATH).evaluateAttributeExpressions(flowFile).getValue();
            final String workingDirPath;
            if (rootPath == null) {
                workingDirPath = null;
            } else {
                File workingDirectory = new File(rootPath);
                if (!workingDirectory.getPath().startsWith("/") && !workingDirectory.getPath().startsWith("\\")) {
                    workingDirectory = new File(transfer.getHomeDirectory(flowFile), workingDirectory.getPath());
                }
                workingDirPath = workingDirectory.getPath().replace("\\", "/");
            }
            final boolean rejectZeroByteFiles = context.getProperty(FileTransfer.REJECT_ZERO_BYTE).asBoolean();
            final ConflictResult conflictResult = identifyAndResolveConflictFile(context.getProperty(FileTransfer.CONFLICT_RESOLUTION).getValue(), transfer, workingDirPath, flowFile, rejectZeroByteFiles, logger);
            if (conflictResult.isTransfer()) {
                final StopWatch stopWatch = new StopWatch();
                stopWatch.start();
                beforePut(flowFile, context, transfer);
                final FlowFile flowFileToTransfer = flowFile;
                final AtomicReference<String> fullPathRef = new AtomicReference<>(null);
                session.read(flowFile, new InputStreamCallback() {

                    @Override
                    public void process(final InputStream in) throws IOException {
                        try (final InputStream bufferedIn = new BufferedInputStream(in)) {
                            if (workingDirPath != null && context.getProperty(SFTPTransfer.CREATE_DIRECTORY).asBoolean()) {
                                transfer.ensureDirectoryExists(flowFileToTransfer, new File(workingDirPath));
                            }
                            fullPathRef.set(transfer.put(flowFileToTransfer, workingDirPath, conflictResult.getFileName(), bufferedIn));
                        }
                    }
                });
                afterPut(flowFile, context, transfer);
                stopWatch.stop();
                final String dataRate = stopWatch.calculateDataRate(flowFile.getSize());
                final long millis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
                logger.info("Successfully transferred {} to {} on remote host {} in {} milliseconds at a rate of {}", new Object[] { flowFile, fullPathRef.get(), hostname, millis, dataRate });
                String fullPathWithSlash = fullPathRef.get();
                if (!fullPathWithSlash.startsWith("/")) {
                    fullPathWithSlash = "/" + fullPathWithSlash;
                }
                final String destinationUri = transfer.getProtocolName() + "://" + hostname + fullPathWithSlash;
                session.getProvenanceReporter().send(flowFile, destinationUri, millis);
            }
            if (conflictResult.isPenalize()) {
                flowFile = session.penalize(flowFile);
            }
            session.transfer(flowFile, conflictResult.getRelationship());
            session.commit();
        } while (isScheduled() && (getRelationships().size() == context.getAvailableRelationships().size()) && (++fileCount < maxNumberOfFiles) && ((flowFile = session.get()) != null));
    } catch (final IOException e) {
        context.yield();
        logger.error("Unable to transfer {} to remote host {} due to {}", new Object[] { flowFile, hostname, e });
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    } catch (final FlowFileAccessException e) {
        context.yield();
        logger.error("Unable to transfer {} to remote host {} due to {}", new Object[] { flowFile, hostname, e.getCause() });
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    } catch (final ProcessException e) {
        context.yield();
        logger.error("Unable to transfer {} to remote host {} due to {}: {}; routing to failure", new Object[] { flowFile, hostname, e, e.getCause() });
        flowFile = session.penalize(flowFile);
        session.transfer(flowFile, REL_FAILURE);
    }
}
Also used : FlowFile(org.apache.nifi.flowfile.FlowFile) FlowFileAccessException(org.apache.nifi.processor.exception.FlowFileAccessException) BufferedInputStream(org.apache.nifi.stream.io.BufferedInputStream) InputStream(java.io.InputStream) AtomicReference(java.util.concurrent.atomic.AtomicReference) IOException(java.io.IOException) ComponentLog(org.apache.nifi.logging.ComponentLog) StopWatch(org.apache.nifi.util.StopWatch) ProcessException(org.apache.nifi.processor.exception.ProcessException) BufferedInputStream(org.apache.nifi.stream.io.BufferedInputStream) InputStreamCallback(org.apache.nifi.processor.io.InputStreamCallback) FlowFile(org.apache.nifi.flowfile.FlowFile) File(java.io.File)

Aggregations

ComponentLog (org.apache.nifi.logging.ComponentLog)211 FlowFile (org.apache.nifi.flowfile.FlowFile)111 ProcessException (org.apache.nifi.processor.exception.ProcessException)95 IOException (java.io.IOException)94 HashMap (java.util.HashMap)51 Map (java.util.Map)47 InputStream (java.io.InputStream)46 ArrayList (java.util.ArrayList)44 PropertyDescriptor (org.apache.nifi.components.PropertyDescriptor)40 HashSet (java.util.HashSet)33 ProcessSession (org.apache.nifi.processor.ProcessSession)32 List (java.util.List)28 ProcessContext (org.apache.nifi.processor.ProcessContext)28 Relationship (org.apache.nifi.processor.Relationship)28 StopWatch (org.apache.nifi.util.StopWatch)28 OutputStream (java.io.OutputStream)27 InputStreamCallback (org.apache.nifi.processor.io.InputStreamCallback)27 Set (java.util.Set)23 Collections (java.util.Collections)21 AtomicReference (java.util.concurrent.atomic.AtomicReference)21