use of org.apache.commons.lang3.StringUtils.isBlank in project kylo by Teradata.
the class AbstractMergeTable method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLog();
FlowFile flowFile = session.get();
if (flowFile == null) {
return;
}
final String blockingValue = context.getProperty(BLOCKING_KEY).evaluateAttributeExpressions(flowFile).getValue();
String flowFileId = flowFile.getAttribute(CoreAttributes.UUID.key());
boolean block = false;
if (blocking && blockingCache.putIfAbsent(blockingValue, flowFileId) != null) {
if (StringUtils.isBlank(flowFile.getAttribute(BLOCKED_START_TIME))) {
flowFile = session.putAttribute(flowFile, BLOCKED_START_TIME, String.valueOf(System.currentTimeMillis()));
getLogger().info("Transferring Flow file {} to blocked relationship", new Object[] { flowFile });
}
// penalize the flow file and transfer to BLOCKED
flowFile = session.penalize(flowFile);
session.transfer(flowFile, REL_BLOCKED);
return;
}
// Add Blocking time to flow file if this was a blocked flowfile.
if (blocking && StringUtils.isNotBlank(flowFile.getAttribute(BLOCKED_START_TIME))) {
String blockedStartTime = flowFile.getAttribute(BLOCKED_START_TIME);
try {
Long l = Long.parseLong(blockedStartTime);
Long blockTime = System.currentTimeMillis() - l;
getLogger().info("Processing Blocked flow file {}. This was blocked for {} ms", new Object[] { flowFile, blockTime });
flowFile = session.putAttribute(flowFile, BLOCKED_TIME, String.valueOf(blockTime) + " ms");
} catch (NumberFormatException e) {
}
}
String PROVENANCE_EXECUTION_STATUS_KEY = context.getName() + " Execution Status";
String partitionSpecString = context.getProperty(PARTITION_SPECIFICATION).evaluateAttributeExpressions(flowFile).getValue();
String sourceSchema = context.getProperty(SOURCE_SCHEMA).evaluateAttributeExpressions(flowFile).getValue();
String sourceTable = context.getProperty(SOURCE_TABLE).evaluateAttributeExpressions(flowFile).getValue();
String targetSchema = context.getProperty(TARGET_SCHEMA).evaluateAttributeExpressions(flowFile).getValue();
String targetTable = context.getProperty(TARGET_TABLE).evaluateAttributeExpressions(flowFile).getValue();
String feedPartitionValue = context.getProperty(FEED_PARTITION).evaluateAttributeExpressions(flowFile).getValue();
String mergeStrategyValue = context.getProperty(MERGE_STRATEGY).evaluateAttributeExpressions(flowFile).getValue();
String hiveConfigurations = context.getProperty(HIVE_CONFIGURATIONS).evaluateAttributeExpressions(flowFile).getValue();
boolean resetHive = context.getProperty(RESET_HIVE).asBoolean();
final ColumnSpec[] columnSpecs = Optional.ofNullable(context.getProperty(FIELD_SPECIFICATION).evaluateAttributeExpressions(flowFile).getValue()).filter(StringUtils::isNotEmpty).map(ColumnSpec::createFromString).orElse(new ColumnSpec[0]);
if (STRATEGY_PK_MERGE.equals(mergeStrategyValue) && (columnSpecs == null || columnSpecs.length == 0)) {
getLog().error("Missing required field specification for PK merge feature");
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Failed: Missing required field specification for PK merge feature");
release(blockingValue);
session.transfer(flowFile, IngestProperties.REL_FAILURE);
return;
}
// Maintain default for backward compatibility
if (StringUtils.isEmpty(mergeStrategyValue)) {
mergeStrategyValue = STRATEGY_DEDUPE_MERGE;
}
logger.info("Merge strategy: " + mergeStrategyValue + " Using Source: " + sourceTable + " Target: " + targetTable + " feed partition:" + feedPartitionValue + " partSpec: " + partitionSpecString);
final StopWatch stopWatch = new StopWatch(true);
try (final Connection conn = getConnection(context)) {
TableMergeSyncSupport mergeSupport = new TableMergeSyncSupport(conn);
if (resetHive) {
mergeSupport.resetHiveConf();
}
mergeSupport.enableDynamicPartitions();
if (StringUtils.isNotEmpty(hiveConfigurations)) {
mergeSupport.setHiveConf(hiveConfigurations.split("\\|"));
}
PartitionSpec partitionSpec = new PartitionSpec(partitionSpecString);
if (STRATEGY_DEDUPE_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, true);
} else if (STRATEGY_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, false);
} else if (STRATEGY_SYNC.equals(mergeStrategyValue)) {
mergeSupport.doSync(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue);
} else if (STRATEGY_ROLLING_SYNC.equals(mergeStrategyValue)) {
mergeSupport.doRollingSync(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue);
} else if (STRATEGY_PK_MERGE.equals(mergeStrategyValue)) {
mergeSupport.doPKMerge(sourceSchema, sourceTable, targetSchema, targetTable, partitionSpec, feedPartitionValue, columnSpecs);
} else {
throw new UnsupportedOperationException("Failed to resolve the merge strategy");
}
stopWatch.stop();
session.getProvenanceReporter().modifyContent(flowFile, "Execution completed", stopWatch.getElapsed(TimeUnit.MILLISECONDS));
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Successful");
release(blockingValue);
session.transfer(flowFile, REL_SUCCESS);
} catch (final Exception e) {
logger.error("Unable to execute merge doMerge for {} due to {}; routing to failure", new Object[] { flowFile, e }, e);
flowFile = session.putAttribute(flowFile, PROVENANCE_EXECUTION_STATUS_KEY, "Failed: " + e.getMessage());
release(blockingValue);
session.transfer(flowFile, REL_FAILURE);
}
}
use of org.apache.commons.lang3.StringUtils.isBlank in project kylo by Teradata.
the class ConfigurationPropertyReplacer method replaceControllerServiceProperties.
/**
* This will replace the Map of Properties in the DTO but not persist back to Nifi. You need to call the rest client to persist the change
*
* @param controllerServiceDTO the controller service
* @param properties the properties to set
* @param propertyDescriptorTransform transformer
* @return {@code true} if the properties were updated, {@code false} if not
*/
public static boolean replaceControllerServiceProperties(ControllerServiceDTO controllerServiceDTO, Map<String, String> properties, NiFiPropertyDescriptorTransform propertyDescriptorTransform) {
Set<String> changedProperties = new HashSet<>();
if (controllerServiceDTO != null) {
// check both Nifis Internal Key name as well as the Displayname to match the properties
CaseInsensitiveMap propertyMap = new CaseInsensitiveMap(properties);
Map<String, String> controllerServiceProperties = controllerServiceDTO.getProperties();
controllerServiceProperties.entrySet().stream().filter(entry -> (propertyMap.containsKey(entry.getKey()) || (controllerServiceDTO.getDescriptors().get(entry.getKey()) != null && propertyMap.containsKey(controllerServiceDTO.getDescriptors().get(entry.getKey()).getDisplayName().toLowerCase())))).forEach(entry -> {
boolean isSensitive = propertyDescriptorTransform.isSensitive(controllerServiceDTO.getDescriptors().get(entry.getKey()));
String value = (String) propertyMap.get(entry.getKey());
if (StringUtils.isBlank(value)) {
value = (String) propertyMap.get(controllerServiceDTO.getDescriptors().get(entry.getKey()).getDisplayName().toLowerCase());
}
if (!isSensitive || (isSensitive && StringUtils.isNotBlank(value))) {
entry.setValue(value);
changedProperties.add(entry.getKey());
}
});
}
return !changedProperties.isEmpty();
}
use of org.apache.commons.lang3.StringUtils.isBlank in project nifi by apache.
the class Notify method onTrigger.
@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
final ComponentLog logger = getLogger();
final PropertyValue signalIdProperty = context.getProperty(RELEASE_SIGNAL_IDENTIFIER);
final PropertyValue counterNameProperty = context.getProperty(SIGNAL_COUNTER_NAME);
final PropertyValue deltaProperty = context.getProperty(SIGNAL_COUNTER_DELTA);
final String attributeCacheRegex = context.getProperty(ATTRIBUTE_CACHE_REGEX).getValue();
final Integer bufferCount = context.getProperty(SIGNAL_BUFFER_COUNT).asInteger();
// the cache client used to interact with the distributed cache.
final AtomicDistributedMapCacheClient cache = context.getProperty(DISTRIBUTED_CACHE_SERVICE).asControllerService(AtomicDistributedMapCacheClient.class);
final WaitNotifyProtocol protocol = new WaitNotifyProtocol(cache);
final Map<String, SignalBuffer> signalBuffers = new HashMap<>();
for (int i = 0; i < bufferCount; i++) {
final FlowFile flowFile = session.get();
if (flowFile == null) {
break;
}
// Signal id is computed from attribute 'RELEASE_SIGNAL_IDENTIFIER' with expression language support
final String signalId = signalIdProperty.evaluateAttributeExpressions(flowFile).getValue();
// if the computed value is null, or empty, we transfer the flow file to failure relationship
if (StringUtils.isBlank(signalId)) {
logger.error("FlowFile {} has no attribute for given Release Signal Identifier", new Object[] { flowFile });
// set 'notified' attribute
session.transfer(session.putAttribute(flowFile, NOTIFIED_ATTRIBUTE_NAME, String.valueOf(false)), REL_FAILURE);
continue;
}
String counterName = counterNameProperty.evaluateAttributeExpressions(flowFile).getValue();
if (StringUtils.isEmpty(counterName)) {
counterName = WaitNotifyProtocol.DEFAULT_COUNT_NAME;
}
int delta = 1;
if (deltaProperty.isSet()) {
final String deltaStr = deltaProperty.evaluateAttributeExpressions(flowFile).getValue();
try {
delta = Integer.parseInt(deltaStr);
} catch (final NumberFormatException e) {
logger.error("Failed to calculate delta for FlowFile {} due to {}", new Object[] { flowFile, e }, e);
session.transfer(session.putAttribute(flowFile, NOTIFIED_ATTRIBUTE_NAME, String.valueOf(false)), REL_FAILURE);
continue;
}
}
if (!signalBuffers.containsKey(signalId)) {
signalBuffers.put(signalId, new SignalBuffer());
}
final SignalBuffer signalBuffer = signalBuffers.get(signalId);
if (StringUtils.isNotEmpty(attributeCacheRegex)) {
flowFile.getAttributes().entrySet().stream().filter(e -> (!e.getKey().equals("uuid") && e.getKey().matches(attributeCacheRegex))).forEach(e -> signalBuffer.attributesToCache.put(e.getKey(), e.getValue()));
}
signalBuffer.incrementDelta(counterName, delta);
signalBuffer.flowFiles.add(flowFile);
if (logger.isDebugEnabled()) {
logger.debug("Cached release signal identifier {} counterName {} from FlowFile {}", new Object[] { signalId, counterName, flowFile });
}
}
signalBuffers.forEach((signalId, signalBuffer) -> {
// retry after yielding for a while.
try {
protocol.notify(signalId, signalBuffer.deltas, signalBuffer.attributesToCache);
signalBuffer.flowFiles.forEach(flowFile -> session.transfer(session.putAttribute(flowFile, NOTIFIED_ATTRIBUTE_NAME, String.valueOf(true)), REL_SUCCESS));
} catch (IOException e) {
throw new RuntimeException(String.format("Unable to communicate with cache when processing %s due to %s", signalId, e), e);
}
});
}
use of org.apache.commons.lang3.StringUtils.isBlank in project nifi by apache.
the class PGList method doExecute.
@Override
public ProcessGroupsResult doExecute(final NiFiClient client, final Properties properties) throws NiFiClientException, IOException {
final FlowClient flowClient = client.getFlowClient();
// get the optional id of the parent PG, otherwise fallback to the root group
String parentPgId = getArg(properties, CommandOption.PG_ID);
if (StringUtils.isBlank(parentPgId)) {
parentPgId = flowClient.getRootGroupId();
}
final ProcessGroupFlowEntity processGroupFlowEntity = flowClient.getProcessGroup(parentPgId);
final ProcessGroupFlowDTO processGroupFlowDTO = processGroupFlowEntity.getProcessGroupFlow();
final FlowDTO flowDTO = processGroupFlowDTO.getFlow();
final List<ProcessGroupDTO> processGroups = new ArrayList<>();
if (flowDTO.getProcessGroups() != null) {
flowDTO.getProcessGroups().stream().map(pge -> pge.getComponent()).forEach(dto -> processGroups.add(dto));
}
return new ProcessGroupsResult(getResultType(properties), processGroups);
}
use of org.apache.commons.lang3.StringUtils.isBlank in project ddf by codice.
the class Historian method version.
/**
* Versions updated {@link Metacard}s and {@link ContentItem}s.
*
* @param streamUpdateRequest Needed to pass {@link ddf.catalog.core.versioning.MetacardVersion#SKIP_VERSIONING}
* flag into downstream update
* @param updateStorageResponse Versions this response's updated items
* @return the update response originally passed in
* @throws UnsupportedQueryException
* @throws SourceUnavailableException
* @throws IngestException
*/
public UpdateStorageResponse version(UpdateStorageRequest streamUpdateRequest, UpdateStorageResponse updateStorageResponse, UpdateResponse updateResponse) throws UnsupportedQueryException, SourceUnavailableException, IngestException {
if (doSkip(updateStorageResponse)) {
return updateStorageResponse;
}
setSkipFlag(streamUpdateRequest);
setSkipFlag(updateStorageResponse);
List<Metacard> updatedMetacards = updateStorageResponse.getUpdatedContentItems().stream().filter(ci -> StringUtils.isBlank(ci.getQualifier())).map(ContentItem::getMetacard).filter(Objects::nonNull).filter(isNotVersionNorDeleted).collect(Collectors.toList());
Map<String, Metacard> originalMetacards = query(forIds(updatedMetacards.stream().map(Metacard::getId).collect(Collectors.toList())));
Collection<ReadStorageRequest> ids = getReadStorageRequests(updatedMetacards);
Map<String, List<ContentItem>> content = getContent(ids);
Function<String, Action> getAction = (id) -> content.containsKey(id) ? Action.VERSIONED_CONTENT : Action.VERSIONED;
Map<String, Metacard> versionMetacards = getVersionMetacards(originalMetacards.values(), getAction, (Subject) updateResponse.getProperties().get(SecurityConstants.SECURITY_SUBJECT));
CreateStorageResponse createStorageResponse = versionContentItems(content, versionMetacards);
if (createStorageResponse == null) {
LOGGER.debug("Could not version content items.");
return updateStorageResponse;
}
setResourceUriForContent(/*mutable*/
versionMetacards, createStorageResponse);
storeVersionMetacards(versionMetacards);
return updateStorageResponse;
}
Aggregations