use of org.apache.nifi.util.StopWatch in project kylo by Teradata.
the class AbstractMergeTable method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLog();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final String blockingValue = context.getProperty(BLOCKING_KEY).evaluateAttributeExpressions(flowFile).getValue();
String flowFileId = flowFile.getAttribute(CoreAttributes.UUID.key());
boolean block = false;
if (blocking && blockingCache.putIfAbsent(blockingValue, flowFileId) != null) {
if (StringUtils.isBlank(flowFile.getAttribute(BLOCKED_START_TIME))) {
flowFile = session.putAttribute(flowFile, BLOCKED_START_TIME, String.valueOf(System.currentTimeMillis()));
getLogger().info("Transferring Flow file {} to blocked relationship", new Object[] { flowFile });
}
// penalize the flow file and transfer to BLOCKED
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_BLOCKED);
return;
}
// Add Blocking time to flow file if this was a blocked flowfile.
if (blocking && StringUtils.isNotBlank(flowFile.getAttribute(BLOCKED_START_TIME))) {
String blockedStartTime = flowFile.getAttribute(BLOCKED_START_TIME);
try {
Long l = Long.parseLong(blockedStartTime);
Long blockTime = System.currentTimeMillis() - l;
getLogger().info("Processing Blocked flow file {}. This was blocked for {} ms", new Object[] { flowFile, blockTime });
flowFile = session.putAttribute(flowFile, BLOCKED_TIME, String.valueOf(blockTime) + " ms");
} catch (NumberFormatException e) {
}
}
String PROVENANCE_EXECUTION_STATUS_KEY = context.getName() + " Execution Status";
String partitionSpecString = context.getProperty(PARTITION_SPECIFICATION).evaluateAttributeExpressions(flowFile).getValue();
String sourceSchema = context.getProperty(SOURCE_SCHEMA).evaluateAttributeExpressions(flowFile).getValue();
String sourceTable = context.getProperty(SOURCE_TABLE).evaluateAttributeExpressions(flowFile).getValue();
String targetSchema = context.getProperty(TARGET_SCHEMA).evaluateAttributeExpressions(flowFile).getValue();
String targetTable = context.getProperty(TARGET_TABLE).evaluateAttributeExpressions(flowFile).getValue();
String feedPartitionValue = context.getProperty(FEED_PARTITION).evaluateAttributeExpressions(flowFile).getValue();
String mergeStrategyValue = context.getProperty(MERGE_STRATEGY).evaluateAttributeExpressions(flowFile).getValue();
String hiveConfigurations = context.getProperty(HIVE_CONFIGURATIONS).evaluateAttributeExpressions(flowFile).getValue();
boolean resetHive = context.getProperty(RESET_HIVE).asBoolean();
final ColumnSpec[] columnSpecs = Optional.ofNullable(context.getProperty(FIELD_SPECIFICATION).evaluateAttributeExpressions(flowFile).getValue()).filter(StringUtils::isNotEmpty).map(ColumnSpec::createFromString).orElse(new ColumnSpec[0]);
if (STRATEGY_PK_MERGE.equals(mergeStrategyValue) && (columnSpecs == null || columnSpecs.length == 0)) {
getLog().error("Missing required field specification for PK merge feature");
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Failed: Missing required field specification for PK merge feature");
release(blockingValue);
session.transfer(flowFile, IngestProperties.REL_FAILURE);
return;
}
// Maintain default for backward compatibility
if (StringUtils.isEmpty(mergeStrategyValue)) {
mergeStrategyValue = STRATEGY_DEDUPE_MERGE;
}
logger.info("Merge strategy: " + mergeStrategyValue + " Using Source: " + sourceTable + " Target: " + targetTable + " feed partition:" + feedPartitionValue + " partSpec: " + partitionSpecString);
final StopWatch stopWatch = new StopWatch(true);
try (final Connection conn = getConnection(context)) {
TableMergeSyncSupport mergeSupport = new TableMergeSyncSupport(conn);
if (resetHive) {
mergeSupport.resetHiveConf();
}
mergeSupport.enableDynamicPartitions();
if (StringUtils.isNotEmpty(hiveConfigurations)) {
mergeSupport.setHiveConf(hiveConfigurations.split("\\|"));
}
PartitionSpec partitionSpec = new PartitionSpec(partitionSpecString);
if (STRATEGY_DEDUPE_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, true);
} else if (STRATEGY_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, false);
} else if (STRATEGY_SYNC.equals(mergeStrategyValue)) {
mergeSupport.doSync(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue);
} else if (STRATEGY_ROLLING_SYNC.equals(mergeStrategyValue)) {
mergeSupport.doRollingSync(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue);
} else if (STRATEGY_PK_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doPKMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, columnSpecs);
} else {
throw new UnsupportedOperationException("Failed to resolve the merge strategy");
}
stopWatch.stop();
session.getProvenanceReporter().modifyContent(flowFile, "Execution completed", stopWatch.getElapsed(TimeUnit.MILLISECONDS));
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Successful");
release(blockingValue);
session.transfer(flowFile, REL_SUCCESS);
} catch (final Exception e) {
logger.error("Unable to execute merge doMerge for {} due to {}; routing to failure", new Object[] { flowFile, e }, e);
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Failed: " + e.getMessage());
release(blockingValue);
session.transfer(flowFile, REL_FAILURE);
}
}
use of org.apache.nifi.util.StopWatch in project kylo by Teradata.
the class ExportSqoop method onTrigger.
@Override
public void onTrigger(ProcessContext context, ProcessSession session) throws ProcessException {
final ComponentLog logger = getLog();
FlowFile flowFile = session.get();
if (flowFile == null) {
flowFile = session.create();
logger.info("Created a flow file having uuid: {}", new Object[] { flowFile.getAttribute(CoreAttributes.UUID.key()) });
} else {
logger.info("Using an existing flow file having uuid: {}", new Object[] { flowFile.getAttribute(CoreAttributes.UUID.key()) });
}
final String kerberosPrincipal = context.getProperty(KERBEROS_PRINCIPAL).getValue();
final String kerberosKeyTab = context.getProperty(KERBEROS_KEYTAB).getValue();
final SqoopConnectionService sqoopConnectionService = context.getProperty(SQOOP_CONNECTION_SERVICE).asControllerService(SqoopConnectionService.class);
final String sourceHdfsDirectory = context.getProperty(SOURCE_HDFS_DIRECTORY).evaluateAttributeExpressions(flowFile).getValue();
final String sourceHdfsFileDelimiter = context.getProperty(SOURCE_HDFS_FILE_DELIMITER).evaluateAttributeExpressions(flowFile).getValue();
final ExportNullInterpretationStrategy sourceNullInterpretationStrategy = ExportNullInterpretationStrategy.valueOf(context.getProperty(SOURCE_NULL_INTERPRETATION_STRATEGY).getValue());
final String sourceNullCustomStringIdentifier = context.getProperty(SOURCE_NULL_CUSTOM_STRING_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue();
final String sourceNullCustomNonStringIdentifier = context.getProperty(SOURCE_NULL_CUSTOM_NON_STRING_IDENTIFIER).evaluateAttributeExpressions(flowFile).getValue();
final String targetTableName = context.getProperty(TARGET_TABLE_NAME).evaluateAttributeExpressions(flowFile).getValue();
final Integer clusterMapTasks = context.getProperty(CLUSTER_MAP_TASKS).evaluateAttributeExpressions(flowFile).asInteger();
final String systemProperties = context.getProperty(SQOOP_SYSTEM_PROPERTIES).evaluateAttributeExpressions(flowFile).getValue();
final String additionalArguments = context.getProperty(SQOOP_ADDITIONAL_ARGUMENTS).evaluateAttributeExpressions(flowFile).getValue();
final String hcatalogDatabase = context.getProperty(HCATALOG_DATABASE).evaluateAttributeExpressions(flowFile).getValue();
final String hcatalogTable = context.getProperty(HCATALOG_TABLE).evaluateAttributeExpressions(flowFile).getValue();
final String COMMAND_SHELL = "/bin/bash";
final String COMMAND_SHELL_FLAGS = "-c";
final StopWatch stopWatch = new StopWatch(false);
KerberosConfig kerberosConfig = new KerberosConfig().setLogger(logger).setKerberosPrincipal(kerberosPrincipal).setKerberosKeytab(kerberosKeyTab);
SqoopExportBuilder sqoopExportBuilder = new SqoopExportBuilder();
String sqoopExportCommand = sqoopExportBuilder.setLogger(logger).setHcatalogDatabase(hcatalogDatabase).setHcatalogTable(hcatalogTable).setSystemProperties(systemProperties).setAdditionalArguments(additionalArguments).setTargetConnectionString(sqoopConnectionService.getConnectionString()).setTargetUserName(sqoopConnectionService.getUserName()).setPasswordMode(sqoopConnectionService.getPasswordMode()).setTargetPasswordHdfsFile(sqoopConnectionService.getPasswordHdfsFile()).setTargetPasswordPassphrase(sqoopConnectionService.getPasswordPassphrase()).setTargetEnteredPassword(sqoopConnectionService.getEnteredPassword()).setTargetConnectionManager(sqoopConnectionService.getConnectionManager()).setTargetDriver(sqoopConnectionService.getDriver()).setTargetTableName(targetTableName).setSourceHdfsDirectory(sourceHdfsDirectory).setSourceHdfsFileDelimiter(sourceHdfsFileDelimiter).setSourceNullInterpretationStrategy(sourceNullInterpretationStrategy).setSourceNullInterpretationStrategyCustomNullString(sourceNullCustomStringIdentifier).setSourceNullInterpretationStrategyCustomNullNonString(sourceNullCustomNonStringIdentifier).setClusterMapTasks(clusterMapTasks).build();
List<String> sqoopExportExecutionCommand = new ArrayList<>();
sqoopExportExecutionCommand.add(COMMAND_SHELL);
sqoopExportExecutionCommand.add(COMMAND_SHELL_FLAGS);
sqoopExportExecutionCommand.add(sqoopExportCommand);
SqoopExportProcessRunner sqoopExportProcessRunner = new SqoopExportProcessRunner(kerberosConfig, sqoopExportExecutionCommand, logger);
logger.info("Starting execution of Sqoop export command");
stopWatch.start();
SqoopProcessResult sqoopExportProcessResult = sqoopExportProcessRunner.execute();
long jobDurationSeconds = stopWatch.getElapsed(TimeUnit.SECONDS);
stopWatch.stop();
logger.info("Finished execution of Sqoop export command");
int resultExportStatus = sqoopExportProcessResult.getExitValue();
SqoopUtils sqoopUtils = new SqoopUtils();
long recordsExportCount = sqoopUtils.getSqoopExportRecordCount(sqoopExportProcessResult, logger);
String sqoopExportCommandWithCredentialsMasked = sqoopUtils.maskCredentials(sqoopExportCommand, sqoopUtils.getCredentialsToMask());
flowFile = session.putAttribute(flowFile, "sqoop.export.command.text", sqoopExportCommandWithCredentialsMasked);
flowFile = session.putAttribute(flowFile, "sqoop.export.result.code", String.valueOf(resultExportStatus));
flowFile = session.putAttribute(flowFile, "sqoop.export.run.seconds", String.valueOf(jobDurationSeconds));
flowFile = session.putAttribute(flowFile, "sqoop.export.record.count", String.valueOf(recordsExportCount));
flowFile = session.putAttribute(flowFile, "sqoop.export.output.table", targetTableName);
logger.info("Wrote result attributes to flow file");
if (resultExportStatus == 0) {
logger.info("Sqoop Export OK [Code {}]", new Object[] { resultExportStatus });
session.transfer(flowFile, REL_SUCCESS);
} else {
logger.info("Sqoop Export FAIL [Code {}]", new Object[] { resultExportStatus });
session.transfer(flowFile, REL_FAILURE);
}
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class AbstractFlowFileServerProtocol method commitTransferTransaction.
protected int commitTransferTransaction(Peer peer, FlowFileTransaction transaction) throws IOException {
ProcessSession session = transaction.getSession();
Set<FlowFile> flowFilesSent = transaction.getFlowFilesSent();
// we've sent a FINISH_TRANSACTION. Now we'll wait for the peer to send a 'Confirm Transaction' response
CommunicationsSession commsSession = peer.getCommunicationsSession();
final Response transactionConfirmationResponse = readTransactionResponse(true, commsSession);
if (transactionConfirmationResponse.getCode() == ResponseCode.CONFIRM_TRANSACTION) {
// Confirm Checksum and echo back the confirmation.
logger.debug("{} Received {} from {}", this, transactionConfirmationResponse, peer);
final String receivedCRC = transactionConfirmationResponse.getMessage();
if (getVersionNegotiator().getVersion() > 3) {
String calculatedCRC = transaction.getCalculatedCRC();
if (!receivedCRC.equals(calculatedCRC)) {
writeTransactionResponse(true, ResponseCode.BAD_CHECKSUM, commsSession);
session.rollback();
throw new IOException(this + " Sent data to peer " + peer + " but calculated CRC32 Checksum as " + calculatedCRC + " while peer calculated CRC32 Checksum as " + receivedCRC + "; canceling transaction and rolling back session");
}
}
writeTransactionResponse(true, ResponseCode.CONFIRM_TRANSACTION, commsSession, "");
} else {
throw new ProtocolException("Expected to receive 'Confirm Transaction' response from peer " + peer + " but received " + transactionConfirmationResponse);
}
final String flowFileDescription = flowFilesSent.size() < 20 ? flowFilesSent.toString() : flowFilesSent.size() + " FlowFiles";
final Response transactionResponse;
try {
transactionResponse = readTransactionResponse(true, commsSession);
} catch (final IOException e) {
logger.error("{} Failed to receive a response from {} when expecting a TransactionFinished Indicator." + " It is unknown whether or not the peer successfully received/processed the data." + " Therefore, {} will be rolled back, possibly resulting in data duplication of {}", this, peer, session, flowFileDescription);
session.rollback();
throw e;
}
logger.debug("{} received {} from {}", new Object[] { this, transactionResponse, peer });
if (transactionResponse.getCode() == ResponseCode.TRANSACTION_FINISHED_BUT_DESTINATION_FULL) {
peer.penalize(port.getIdentifier(), port.getYieldPeriod(TimeUnit.MILLISECONDS));
} else if (transactionResponse.getCode() != ResponseCode.TRANSACTION_FINISHED) {
throw new ProtocolException("After sending data, expected TRANSACTION_FINISHED response but got " + transactionResponse);
}
session.commit();
StopWatch stopWatch = transaction.getStopWatch();
long bytesSent = transaction.getBytesSent();
stopWatch.stop();
final String uploadDataRate = stopWatch.calculateDataRate(bytesSent);
final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
final String dataSize = FormatUtils.formatDataSize(bytesSent);
logger.info("{} Successfully sent {} ({}) to {} in {} milliseconds at a rate of {}", new Object[] { this, flowFileDescription, dataSize, peer, uploadMillis, uploadDataRate });
return flowFilesSent.size();
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class AbstractFlowFileServerProtocol method commitReceiveTransaction.
protected int commitReceiveTransaction(Peer peer, FlowFileTransaction transaction) throws IOException {
CommunicationsSession commsSession = peer.getCommunicationsSession();
ProcessSession session = transaction.getSession();
final Response confirmTransactionResponse = readTransactionResponse(false, commsSession);
logger.debug("{} Received {} from {}", this, confirmTransactionResponse, peer);
switch(confirmTransactionResponse.getCode()) {
case CONFIRM_TRANSACTION:
break;
case BAD_CHECKSUM:
session.rollback();
throw new IOException(this + " Received a BadChecksum response from peer " + peer);
default:
throw new ProtocolException(this + " Received unexpected Response Code from peer " + peer + " : " + confirmTransactionResponse + "; expected 'Confirm Transaction' Response Code");
}
// Commit the session so that we have persisted the data
session.commit();
if (transaction.getContext().getAvailableRelationships().isEmpty()) {
// Confirm that we received the data and the peer can now discard it but that the peer should not
// send any more data for a bit
logger.debug("{} Sending TRANSACTION_FINISHED_BUT_DESTINATION_FULL to {}", this, peer);
writeTransactionResponse(false, ResponseCode.TRANSACTION_FINISHED_BUT_DESTINATION_FULL, commsSession);
} else {
// Confirm that we received the data and the peer can now discard it
logger.debug("{} Sending TRANSACTION_FINISHED to {}", this, peer);
writeTransactionResponse(false, ResponseCode.TRANSACTION_FINISHED, commsSession);
}
Set<FlowFile> flowFilesReceived = transaction.getFlowFilesSent();
long bytesReceived = transaction.getBytesSent();
StopWatch stopWatch = transaction.getStopWatch();
stopWatch.stop();
final String flowFileDescription = flowFilesReceived.size() < 20 ? flowFilesReceived.toString() : flowFilesReceived.size() + " FlowFiles";
final String uploadDataRate = stopWatch.calculateDataRate(bytesReceived);
final long uploadMillis = stopWatch.getDuration(TimeUnit.MILLISECONDS);
final String dataSize = FormatUtils.formatDataSize(bytesReceived);
logger.info("{} Successfully received {} ({}) from {} in {} milliseconds at a rate of {}", new Object[] { this, flowFileDescription, dataSize, peer, uploadMillis, uploadDataRate });
return flowFilesReceived.size();
}
use of org.apache.nifi.util.StopWatch in project nifi by apache.
the class ExtractCCDAAttributes method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) {
// stores CDA attributes
Map<String, String> attributes = new TreeMap<String, String>();
getLogger().info("Processing CCDA");
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
if (processMap.isEmpty()) {
getLogger().error("Process Mapping is not loaded");
session.transfer(flowFile, REL_FAILURE);
return;
}
final Boolean skipValidation = context.getProperty(SKIP_VALIDATION).asBoolean();
final StopWatch stopWatch = new StopWatch(true);
ClinicalDocument cd = null;
try {
// Load and optionally validate CDA document
cd = loadDocument(session.read(flowFile), skipValidation);
} catch (ProcessException e) {
session.transfer(flowFile, REL_FAILURE);
return;
}
getLogger().debug("Loaded document for {} in {}", new Object[] { flowFile, stopWatch.getElapsed(TimeUnit.MILLISECONDS) });
getLogger().debug("Processing elements");
// Process CDA element using mapping data
processElement(null, cd, attributes);
flowFile = session.putAllAttributes(flowFile, attributes);
stopWatch.stop();
getLogger().debug("Successfully processed {} in {}", new Object[] { flowFile, stopWatch.getDuration(TimeUnit.MILLISECONDS) });
if (getLogger().isDebugEnabled()) {
for (Entry<String, String> entry : attributes.entrySet()) {
getLogger().debug("Attribute: {}={}", new Object[] { entry.getKey(), entry.getValue() });
}
}
session.transfer(flowFile, REL_SUCCESS);
}
Aggregations