use of org.wso2.charon3.core.utils.codeutils.Node in project wso2-synapse by wso2.
the class VFSTransportListener method scanFileOrDirectory.
/**
* Search for files that match the given regex pattern and create a list
* Then process each of these files and update the status of the scan on
* the poll table
* @param entry the poll table entry for the scan
* @param fileURI the file or directory to be scanned
*/
private void scanFileOrDirectory(final PollTableEntry entry, String fileURI) {
if (log.isDebugEnabled()) {
log.debug("Polling: " + VFSUtils.maskURLPassword(fileURI));
}
if (entry.isClusterAware()) {
boolean leader = true;
ClusteringAgent agent = getConfigurationContext().getAxisConfiguration().getClusteringAgent();
if (agent != null && agent.getParameter("domain") != null) {
// hazelcast clustering instance name
String hazelcastInstanceName = agent.getParameter("domain").getValue() + ".instance";
HazelcastInstance instance = Hazelcast.getHazelcastInstanceByName(hazelcastInstanceName);
if (instance != null) {
// dirty leader election
leader = instance.getCluster().getMembers().iterator().next().localMember();
} else {
log.warn("Clustering error, running the polling task in this node");
}
} else {
log.warn("Although proxy is cluster aware, clustering config are not present, hence running the" + " the polling task in this node");
}
if (!leader) {
if (log.isDebugEnabled()) {
log.debug("This Member is not the leader");
}
entry.setLastPollState(PollTableEntry.NONE);
long now = System.currentTimeMillis();
entry.setLastPollTime(now);
entry.setNextPollTime(now + entry.getPollInterval());
onPollCompletion(entry);
return;
}
if (log.isDebugEnabled()) {
log.debug("This Member is the leader");
}
}
FileSystemOptions fso = null;
setFileSystemClosed(false);
try {
fso = VFSUtils.attachFileSystemOptions(entry.getVfsSchemeProperties(), fsManager);
} catch (Exception e) {
log.error("Error while attaching VFS file system properties. " + e.getMessage());
}
FileObject fileObject = null;
// TODO : Trying to make the correct URL out of the malformed one.
if (fileURI.contains("vfs:")) {
fileURI = fileURI.substring(fileURI.indexOf("vfs:") + 4);
}
if (log.isDebugEnabled()) {
log.debug("Scanning directory or file : " + VFSUtils.maskURLPassword(fileURI));
}
boolean wasError = true;
int retryCount = 0;
int maxRetryCount = entry.getMaxRetryCount();
long reconnectionTimeout = entry.getReconnectTimeout();
while (wasError) {
try {
retryCount++;
fileObject = fsManager.resolveFile(fileURI, fso);
if (fileObject == null) {
log.error("fileObject is null");
throw new FileSystemException("fileObject is null");
}
wasError = false;
} catch (FileSystemException e) {
if (retryCount >= maxRetryCount) {
processFailure("Repeatedly failed to resolve the file URI: " + VFSUtils.maskURLPassword(fileURI), e, entry);
closeFileSystem(fileObject);
return;
} else {
log.warn("Failed to resolve the file URI: " + VFSUtils.maskURLPassword(fileURI) + ", in attempt " + retryCount + ", " + e.getMessage() + " Retrying in " + reconnectionTimeout + " milliseconds.");
}
}
if (wasError) {
try {
Thread.sleep(reconnectionTimeout);
} catch (InterruptedException e2) {
log.error("Thread was interrupted while waiting to reconnect.", e2);
}
}
}
try {
if (fileObject.exists() && fileObject.isReadable()) {
entry.setLastPollState(PollTableEntry.NONE);
FileObject[] children = null;
try {
children = fileObject.getChildren();
} catch (FileNotFolderException ignored) {
} catch (FileSystemException ex) {
log.error(ex.getMessage(), ex);
}
// if this is a file that would translate to a single message
if (children == null || children.length == 0) {
boolean isFailedRecord = false;
if (entry.getMoveAfterMoveFailure() != null) {
isFailedRecord = isFailedRecord(fileObject, entry);
}
if (fileObject.getType() == FileType.FILE && !isFailedRecord) {
boolean runPostProcess = true;
if (!entry.isFileLockingEnabled() || (entry.isFileLockingEnabled() && acquireLock(fsManager, fileObject, entry, fso, true))) {
try {
if (fileObject.getType() == FileType.FILE) {
processFile(entry, fileObject);
entry.setLastPollState(PollTableEntry.SUCCSESSFUL);
metrics.incrementMessagesReceived();
} else {
runPostProcess = false;
}
} catch (AxisFault e) {
if (e.getCause() instanceof FileNotFoundException) {
log.warn("Error processing File URI : " + VFSUtils.maskURLPassword(fileObject.getName().toString()) + ". This can be due to file moved from another process.");
runPostProcess = false;
} else {
logException("Error processing File URI : " + VFSUtils.maskURLPassword(fileObject.getName().getURI()), e);
entry.setLastPollState(PollTableEntry.FAILED);
metrics.incrementFaultsReceiving();
}
}
if (runPostProcess) {
try {
moveOrDeleteAfterProcessing(entry, fileObject, fso);
} catch (AxisFault axisFault) {
logException("File object '" + VFSUtils.maskURLPassword(fileObject.getURL().toString()) + "' " + "cloud not be moved", axisFault);
entry.setLastPollState(PollTableEntry.FAILED);
String timeStamp = VFSUtils.getSystemTime(entry.getFailedRecordTimestampFormat());
addFailedRecord(entry, fileObject, timeStamp);
}
}
if (entry.isFileLockingEnabled()) {
VFSUtils.releaseLock(fsManager, fileObject, fso);
if (log.isDebugEnabled()) {
log.debug("Removed the lock file '" + VFSUtils.maskURLPassword(fileObject.toString()) + ".lock' of the file '" + VFSUtils.maskURLPassword(fileObject.toString()));
}
}
} else if (log.isDebugEnabled()) {
log.debug("Couldn't get the lock for processing the file : " + VFSUtils.maskURLPassword(fileObject.getName().getURI()));
} else if (isFailedRecord) {
if (entry.isFileLockingEnabled()) {
VFSUtils.releaseLock(fsManager, fileObject, fso);
}
// schedule a cleanup task if the file is there
if (fsManager.resolveFile(fileObject.getURL().toString(), fso) != null && removeTaskState == STATE_STOPPED && entry.getMoveAfterMoveFailure() != null) {
workerPool.execute(new FileRemoveTask(entry, fileObject, fso));
}
if (log.isDebugEnabled()) {
log.debug("File '" + VFSUtils.maskURLPassword(fileObject.getURL().toString()) + "' has been marked as a failed" + " record, it will not process");
}
}
}
} else {
int failCount = 0;
int successCount = 0;
int processCount = 0;
Integer iFileProcessingInterval = entry.getFileProcessingInterval();
Integer iFileProcessingCount = entry.getFileProcessingCount();
if (log.isDebugEnabled()) {
log.debug("File name pattern : " + entry.getFileNamePattern());
}
// Sort the files
String strSortParam = entry.getFileSortParam();
if (strSortParam != null) {
log.debug("Start Sorting the files.");
boolean bSortOrderAsscending = entry.isFileSortAscending();
if (log.isDebugEnabled()) {
log.debug("Sorting the files by : " + strSortParam + ". (" + bSortOrderAsscending + ")");
}
if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_NAME) && bSortOrderAsscending) {
Arrays.sort(children, new FileNameAscComparator());
} else if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_NAME) && !bSortOrderAsscending) {
Arrays.sort(children, new FileNameDesComparator());
} else if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_SIZE) && bSortOrderAsscending) {
Arrays.sort(children, new FileSizeAscComparator());
} else if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_SIZE) && !bSortOrderAsscending) {
Arrays.sort(children, new FileSizeDesComparator());
} else if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_LASTMODIFIEDTIMESTAMP) && bSortOrderAsscending) {
Arrays.sort(children, new FileLastmodifiedtimestampAscComparator());
} else if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_LASTMODIFIEDTIMESTAMP) && !bSortOrderAsscending) {
Arrays.sort(children, new FileLastmodifiedtimestampDesComparator());
}
log.debug("End Sorting the files.");
}
for (FileObject child : children) {
// Stop processing when service get undeployed
if (state != BaseConstants.STARTED || !entry.getService().isActive()) {
return;
}
/**
* Before starting to process another file, see whether the proxy is stopped or not.
*/
if (entry.isCanceled()) {
break;
}
// skipping *.lock file
if (child.getName().getBaseName().endsWith(".lock")) {
continue;
}
// skipping subfolders
if (child.getType() != FileType.FILE) {
continue;
}
// skipping files depending on size limitation
if (entry.getFileSizeLimit() >= 0 && child.getContent().getSize() > entry.getFileSizeLimit()) {
if (log.isDebugEnabled()) {
log.debug("Ignoring file - " + child.getName().getBaseName() + " size - " + child.getContent().getSize() + " since it exceeds file size limit - " + entry.getFileSizeLimit());
}
continue;
}
boolean isFailedRecord = false;
if (entry.getMoveAfterMoveFailure() != null) {
isFailedRecord = isFailedRecord(child, entry);
}
if (entry.getFileNamePattern() != null && child.getName().getBaseName().matches(entry.getFileNamePattern())) {
// now we try to get the lock and process
if (log.isDebugEnabled()) {
log.debug("Matching file : " + child.getName().getBaseName());
}
boolean runPostProcess = true;
if ((!entry.isFileLockingEnabled() || (entry.isFileLockingEnabled() && VFSUtils.acquireLock(fsManager, child, fso, true))) && !isFailedRecord) {
// process the file
try {
if (log.isDebugEnabled()) {
log.debug("Processing file :" + VFSUtils.maskURLPassword(child.toString()));
}
processCount++;
if (child.getType() == FileType.FILE) {
processFile(entry, child);
successCount++;
// tell moveOrDeleteAfterProcessing() file was success
entry.setLastPollState(PollTableEntry.SUCCSESSFUL);
metrics.incrementMessagesReceived();
} else {
runPostProcess = false;
}
} catch (Exception e) {
if (e.getCause() instanceof FileNotFoundException) {
log.warn("Error processing File URI : " + VFSUtils.maskURLPassword(child.getName().toString()) + ". This can be due to file moved from another process.");
runPostProcess = false;
} else {
logException("Error processing File URI : " + VFSUtils.maskURLPassword(child.getName().getURI()), e);
failCount++;
// tell moveOrDeleteAfterProcessing() file failed
entry.setLastPollState(PollTableEntry.FAILED);
metrics.incrementFaultsReceiving();
}
}
// skipping un-locking file if failed to do delete/move after process
boolean skipUnlock = false;
if (runPostProcess) {
try {
moveOrDeleteAfterProcessing(entry, child, fso);
} catch (AxisFault axisFault) {
logException("File object '" + VFSUtils.maskURLPassword(child.getURL().toString()) + "'cloud not be moved, will remain in \"locked\" state", axisFault);
skipUnlock = true;
failCount++;
entry.setLastPollState(PollTableEntry.FAILED);
String timeStamp = VFSUtils.getSystemTime(entry.getFailedRecordTimestampFormat());
addFailedRecord(entry, child, timeStamp);
}
}
// if there is a failure or not we'll try to release the lock
if (entry.isFileLockingEnabled() && !skipUnlock) {
VFSUtils.releaseLock(fsManager, child, fso);
}
}
} else if (entry.getFileNamePattern() != null && !child.getName().getBaseName().matches(entry.getFileNamePattern())) {
// child's file name does not match the file name pattern
if (log.isDebugEnabled()) {
log.debug("Non-Matching file : " + child.getName().getBaseName());
}
} else if (isFailedRecord) {
// it is a failed record
if (entry.isFileLockingEnabled()) {
VFSUtils.releaseLock(fsManager, child, fso);
VFSUtils.releaseLock(fsManager, fileObject, fso);
}
if (fsManager.resolveFile(child.getURL().toString(), fso) != null && removeTaskState == STATE_STOPPED && entry.getMoveAfterMoveFailure() != null) {
workerPool.execute(new FileRemoveTask(entry, child, fso));
}
if (log.isDebugEnabled()) {
log.debug("File '" + VFSUtils.maskURLPassword(fileObject.getURL().toString()) + "' has been marked as a failed record, it will not " + "process");
}
}
if (iFileProcessingInterval != null && iFileProcessingInterval > 0) {
try {
if (log.isDebugEnabled()) {
log.debug("Put the VFS processor to sleep for : " + iFileProcessingInterval);
}
Thread.sleep(iFileProcessingInterval);
} catch (InterruptedException ie) {
log.error("Unable to set the interval between file processors." + ie);
}
} else if (iFileProcessingCount != null && iFileProcessingCount <= processCount) {
break;
}
}
if (failCount == 0 && successCount > 0) {
entry.setLastPollState(PollTableEntry.SUCCSESSFUL);
} else if (successCount == 0 && failCount > 0) {
entry.setLastPollState(PollTableEntry.FAILED);
} else {
entry.setLastPollState(PollTableEntry.WITH_ERRORS);
}
}
// processing of this poll table entry is complete
long now = System.currentTimeMillis();
entry.setLastPollTime(now);
entry.setNextPollTime(now + entry.getPollInterval());
} else if (log.isDebugEnabled()) {
log.debug("Unable to access or read file or directory : " + VFSUtils.maskURLPassword(fileURI) + "." + " Reason: " + (fileObject.exists() ? (fileObject.isReadable() ? "Unknown reason" : "The file can not be read!") : "The file does not exists!"));
}
onPollCompletion(entry);
} catch (FileSystemException e) {
processFailure("Error checking for existence and readability : " + VFSUtils.maskURLPassword(fileURI), e, entry);
} catch (Exception ex) {
processFailure("Un-handled exception thrown when processing the file : ", ex, entry);
} finally {
closeFileSystem(fileObject);
}
}
use of org.wso2.charon3.core.utils.codeutils.Node in project kubernetes by ballerinax.
the class KubernetesAnnotationProcessor method processPersistentVolumeClaim.
/**
* Process PersistentVolumeClaim annotations.
*
* @param attachmentNode Attachment Node
* @return Set of @{@link ConfigMapModel} objects
*/
Set<PersistentVolumeClaimModel> processPersistentVolumeClaim(AnnotationAttachmentNode attachmentNode) throws KubernetesPluginException {
Set<PersistentVolumeClaimModel> volumeClaimModels = new HashSet<>();
List<BLangRecordLiteral.BLangRecordKeyValue> keyValues = ((BLangRecordLiteral) ((BLangAnnotationAttachment) attachmentNode).expr).getKeyValuePairs();
for (BLangRecordLiteral.BLangRecordKeyValue keyValue : keyValues) {
List<BLangExpression> secretAnnotation = ((BLangArrayLiteral) keyValue.valueExpr).exprs;
for (BLangExpression bLangExpression : secretAnnotation) {
PersistentVolumeClaimModel claimModel = new PersistentVolumeClaimModel();
List<BLangRecordLiteral.BLangRecordKeyValue> annotationValues = ((BLangRecordLiteral) bLangExpression).getKeyValuePairs();
for (BLangRecordLiteral.BLangRecordKeyValue annotation : annotationValues) {
VolumeClaimConfig volumeMountConfig = VolumeClaimConfig.valueOf(annotation.getKey().toString());
String annotationValue = resolveValue(annotation.getValue().toString());
switch(volumeMountConfig) {
case name:
claimModel.setName(getValidName(annotationValue));
break;
case mountPath:
claimModel.setMountPath(annotationValue);
break;
case accessMode:
claimModel.setAccessMode(annotationValue);
break;
case volumeClaimSize:
claimModel.setVolumeClaimSize(annotationValue);
break;
case readOnly:
claimModel.setReadOnly(Boolean.parseBoolean(annotationValue));
break;
default:
break;
}
}
volumeClaimModels.add(claimModel);
}
}
return volumeClaimModels;
}
use of org.wso2.charon3.core.utils.codeutils.Node in project kubernetes by ballerinax.
the class KubernetesAnnotationProcessor method processConfigMap.
/**
* Process ConfigMap annotations.
*
* @param attachmentNode Attachment Node
* @return Set of @{@link ConfigMapModel} objects
*/
Set<ConfigMapModel> processConfigMap(AnnotationAttachmentNode attachmentNode) throws KubernetesPluginException {
Set<ConfigMapModel> configMapModels = new HashSet<>();
List<BLangRecordLiteral.BLangRecordKeyValue> keyValues = ((BLangRecordLiteral) ((BLangAnnotationAttachment) attachmentNode).expr).getKeyValuePairs();
for (BLangRecordLiteral.BLangRecordKeyValue keyValue : keyValues) {
List<BLangExpression> configAnnotation = ((BLangArrayLiteral) keyValue.valueExpr).exprs;
for (BLangExpression bLangExpression : configAnnotation) {
ConfigMapModel configMapModel = new ConfigMapModel();
List<BLangRecordLiteral.BLangRecordKeyValue> annotationValues = ((BLangRecordLiteral) bLangExpression).getKeyValuePairs();
for (BLangRecordLiteral.BLangRecordKeyValue annotation : annotationValues) {
VolumeMountConfig volumeMountConfig = VolumeMountConfig.valueOf(annotation.getKey().toString());
String annotationValue = resolveValue(annotation.getValue().toString());
switch(volumeMountConfig) {
case name:
configMapModel.setName(getValidName(annotationValue));
break;
case mountPath:
configMapModel.setMountPath(annotationValue);
break;
case isBallerinaConf:
configMapModel.setBallerinaConf(Boolean.parseBoolean(annotationValue));
break;
case data:
List<BLangExpression> data = ((BLangArrayLiteral) annotation.valueExpr).exprs;
configMapModel.setData(getDataForConfigMap(data));
break;
case readOnly:
configMapModel.setReadOnly(Boolean.parseBoolean(annotationValue));
break;
default:
break;
}
}
configMapModels.add(configMapModel);
}
}
return configMapModels;
}
use of org.wso2.charon3.core.utils.codeutils.Node in project kubernetes by ballerinax.
the class KubernetesAnnotationProcessor method processPodAutoscalerAnnotation.
/**
* Process annotations and create service model object.
*
* @param attachmentNode annotation attachment node.
* @return Service model object
*/
PodAutoscalerModel processPodAutoscalerAnnotation(AnnotationAttachmentNode attachmentNode) throws KubernetesPluginException {
PodAutoscalerModel podAutoscalerModel = new PodAutoscalerModel();
List<BLangRecordLiteral.BLangRecordKeyValue> keyValues = ((BLangRecordLiteral) ((BLangAnnotationAttachment) attachmentNode).expr).getKeyValuePairs();
for (BLangRecordLiteral.BLangRecordKeyValue keyValue : keyValues) {
PodAutoscalerConfiguration podAutoscalerConfiguration = PodAutoscalerConfiguration.valueOf(keyValue.getKey().toString());
String annotationValue = resolveValue(keyValue.getValue().toString());
switch(podAutoscalerConfiguration) {
case name:
podAutoscalerModel.setName(getValidName(annotationValue));
break;
case labels:
podAutoscalerModel.setLabels(getMap(((BLangRecordLiteral) keyValue.valueExpr).keyValuePairs));
break;
case cpuPercentage:
podAutoscalerModel.setCpuPercentage(Integer.parseInt(annotationValue));
break;
case minReplicas:
podAutoscalerModel.setMinReplicas(Integer.parseInt(annotationValue));
break;
case maxReplicas:
podAutoscalerModel.setMaxReplicas(Integer.parseInt(annotationValue));
break;
default:
break;
}
}
return podAutoscalerModel;
}
use of org.wso2.charon3.core.utils.codeutils.Node in project ballerina by ballerina-lang.
the class TestAnnotationProcessor method process.
@Override
public void process(FunctionNode functionNode, List<AnnotationAttachmentNode> annotations) {
// to avoid processing those, we have to have below check.
if (!suite.getSuiteName().equals(functionNode.getPosition().getSource().getPackageName())) {
return;
}
// traverse through the annotations of this function
for (AnnotationAttachmentNode attachmentNode : annotations) {
String annotationName = attachmentNode.getAnnotationName().getValue();
String functionName = functionNode.getName().getValue();
if (BEFORE_SUITE_ANNOTATION_NAME.equals(annotationName)) {
suite.addBeforeSuiteFunction(functionName);
} else if (AFTER_SUITE_ANNOTATION_NAME.equals(annotationName)) {
suite.addAfterSuiteFunction(functionName);
} else if (BEFORE_EACH_ANNOTATION_NAME.equals(annotationName)) {
suite.addBeforeEachFunction(functionName);
} else if (AFTER_EACH_ANNOTATION_NAME.equals(annotationName)) {
suite.addAfterEachFunction(functionName);
} else if (MOCK_ANNOTATION_NAME.equals(annotationName)) {
String[] vals = new String[2];
// If package property not present the package is .
// TODO: when default values are supported in annotation struct we can remove this
vals[0] = ".";
if (attachmentNode.getExpression() instanceof BLangRecordLiteral) {
List<BLangRecordLiteral.BLangRecordKeyValue> attributes = ((BLangRecordLiteral) attachmentNode.getExpression()).getKeyValuePairs();
attributes.forEach(attributeNode -> {
String name = attributeNode.getKey().toString();
String value = attributeNode.getValue().toString();
if (PACKAGE.equals(name)) {
vals[0] = value;
} else if (FUNCTION.equals(name)) {
vals[1] = value;
}
});
suite.addMockFunction(vals[0] + MOCK_ANNOTATION_DELIMITER + vals[1], functionName);
}
} else if (TEST_ANNOTATION_NAME.equals(annotationName)) {
Test test = new Test();
test.setTestName(functionName);
AtomicBoolean shouldSkip = new AtomicBoolean();
AtomicBoolean groupsFound = new AtomicBoolean();
List<String> groups = registry.getGroups();
boolean shouldIncludeGroups = registry.shouldIncludeGroups();
if (attachmentNode.getExpression() instanceof BLangRecordLiteral) {
List<BLangRecordLiteral.BLangRecordKeyValue> attributes = ((BLangRecordLiteral) attachmentNode.getExpression()).getKeyValuePairs();
attributes.forEach(attributeNode -> {
String name = attributeNode.getKey().toString();
// Check if enable property is present in the annotation
if (TEST_ENABLE_ANNOTATION_NAME.equals(name) && "false".equals(attributeNode.getValue().toString())) {
// If enable is false, disable the test, no further processing is needed
shouldSkip.set(true);
return;
}
// Check whether user has provided a group list
if (groups != null && !groups.isEmpty()) {
// check if groups attribute is present in the annotation
if (GROUP_ANNOTATION_NAME.equals(name)) {
if (attributeNode.getValue() instanceof BLangArrayLiteral) {
BLangArrayLiteral values = (BLangArrayLiteral) attributeNode.getValue();
boolean isGroupPresent = isGroupAvailable(groups, values.exprs.stream().map(node -> node.toString()).collect(Collectors.toList()));
if (shouldIncludeGroups) {
// include only if the test belong to one of these groups
if (!isGroupPresent) {
// skip the test if this group is not defined in this test
shouldSkip.set(true);
return;
}
} else {
// exclude only if the test belong to one of these groups
if (isGroupPresent) {
// skip if this test belongs to one of the excluded groups
shouldSkip.set(true);
return;
}
}
groupsFound.set(true);
}
}
}
if (VALUE_SET_ANNOTATION_NAME.equals(name)) {
test.setDataProvider(attributeNode.getValue().toString());
}
if (BEFORE_FUNCTION.equals(name)) {
test.setBeforeTestFunction(attributeNode.getValue().toString());
}
if (AFTER_FUNCTION.equals(name)) {
test.setAfterTestFunction(attributeNode.getValue().toString());
}
if (DEPENDS_ON_FUNCTIONS.equals(name)) {
if (attributeNode.getValue() instanceof BLangArrayLiteral) {
BLangArrayLiteral values = (BLangArrayLiteral) attributeNode.getValue();
values.exprs.stream().map(node -> node.toString()).forEach(test::addDependsOnTestFunction);
}
}
});
}
if (groups != null && !groups.isEmpty() && !groupsFound.get() && shouldIncludeGroups) {
// if the user has asked to run only a specific list of groups and this test doesn't have
// that group, we should skip the test
shouldSkip.set(true);
}
if (!shouldSkip.get()) {
suite.addTests(test);
}
} else {
// disregard this annotation
}
}
}
Aggregations