use of org.wso2.carbon.apimgt.api.model.policy.Limit in project carbon-apimgt by wso2.
the class CommonThrottleMappingUtilTestCase method fromRequestCountThrottleLimitDtoToQuotaPolicyTest.
@Test(description = "Convert Request Count Throttle Limit DTO to Quota Policy")
public void fromRequestCountThrottleLimitDtoToQuotaPolicyTest() throws Exception {
ThrottleLimitDTO throttleLimitDTO = new ThrottleLimitDTO();
throttleLimitDTO.setType(PolicyConstants.REQUEST_COUNT_LIMIT_TYPE);
RequestCountLimitDTO requestCountLimitDTO = new RequestCountLimitDTO();
requestCountLimitDTO.setRequestCount(100);
throttleLimitDTO.setRequestCountLimit(requestCountLimitDTO);
throttleLimitDTO.setTimeUnit("sec");
throttleLimitDTO.setUnitTime(1);
QuotaPolicy policy = CommonThrottleMappingUtil.fromDTOToQuotaPolicy(throttleLimitDTO);
Assert.assertNotNull(policy);
RequestCountLimit limit = (RequestCountLimit) policy.getLimit();
Assert.assertNotNull(limit);
assertEquals(limit.getRequestCount(), 100);
assertEquals(limit.getTimeUnit(), "sec");
assertEquals(limit.getUnitTime(), 1);
}
use of org.wso2.carbon.apimgt.api.model.policy.Limit in project carbon-apimgt by wso2.
the class CommonThrottleMappingUtilTestCase method fromBandwidthThrottleLimitDtoToQuotaPolicyTest.
@Test(description = "Convert Bandwidth Throttle Limit DTO to Quota Policy")
public void fromBandwidthThrottleLimitDtoToQuotaPolicyTest() throws Exception {
ThrottleLimitDTO throttleLimitDTO = new ThrottleLimitDTO();
throttleLimitDTO.setType(PolicyConstants.BANDWIDTH_LIMIT_TYPE);
BandwidthLimitDTO bandwidthLimitDTO = new BandwidthLimitDTO();
bandwidthLimitDTO.setDataAmount(10);
bandwidthLimitDTO.setDataUnit(KB);
throttleLimitDTO.setBandwidthLimit(bandwidthLimitDTO);
throttleLimitDTO.setTimeUnit("min");
throttleLimitDTO.setUnitTime(1);
QuotaPolicy policy = CommonThrottleMappingUtil.fromDTOToQuotaPolicy(throttleLimitDTO);
Assert.assertNotNull(policy);
assertEquals(policy.getType(), PolicyConstants.BANDWIDTH_TYPE);
BandwidthLimit bandwidthLimit = (BandwidthLimit) policy.getLimit();
assertEquals(bandwidthLimit.getDataAmount(), 10);
assertEquals(bandwidthLimit.getDataUnit(), KB);
assertEquals(bandwidthLimit.getTimeUnit(), "min");
assertEquals(bandwidthLimit.getUnitTime(), 1);
}
use of org.wso2.carbon.apimgt.api.model.policy.Limit in project carbon-apimgt by wso2.
the class SubscriptionsApiServiceImpl method subscriptionsGet.
/**
* Retrieve subscriptions
*
* @param apiContext Context of the API
* @param apiVersion API version
* @param limit Limit value
* @return Subscriptions of the API
* @throws NotFoundException If failed to retrieve subscriptions
*/
@Override
public Response subscriptionsGet(String apiContext, String apiVersion, Integer limit, String accept, Request request) throws NotFoundException {
try {
APIMgtAdminService apiMgtAdminService = APIManagerFactory.getInstance().getAPIMgtAdminService();
List<SubscriptionValidationData> subscriptionsOfApi;
if (StringUtils.isEmpty(apiContext) || StringUtils.isEmpty(apiVersion)) {
APIUtils.logDebug("API Context or version is null or empty. Retrieving subscriptions of all APIs", log);
subscriptionsOfApi = apiMgtAdminService.getAPISubscriptions(limit);
} else {
subscriptionsOfApi = apiMgtAdminService.getAPISubscriptionsOfApi(apiContext, apiVersion);
}
SubscriptionListDTO subscriptionsList = new SubscriptionListDTO();
subscriptionsList.setList(MappingUtil.convertToSubscriptionListDto(subscriptionsOfApi));
return Response.ok(subscriptionsList).build();
} catch (APIManagementException e) {
String errorMessage = "Error while retrieving subscriptions.";
HashMap<String, String> paramList = new HashMap<String, String>();
if (!StringUtils.isEmpty(apiContext)) {
paramList.put(APIMgtConstants.ExceptionsConstants.API_CONTEXT, apiContext);
}
if (!StringUtils.isEmpty(apiVersion)) {
paramList.put(APIMgtConstants.ExceptionsConstants.API_VERSION, apiVersion);
}
ErrorDTO errorDTO = RestApiUtil.getErrorDTO(e.getErrorHandler(), paramList);
log.error(errorMessage, e);
return Response.status(e.getErrorHandler().getHttpStatusCode()).entity(errorDTO).build();
}
}
use of org.wso2.carbon.apimgt.api.model.policy.Limit in project wso2-synapse by wso2.
the class HttpMessageHandler method getMessageDataStream.
@Override
public InputStream getMessageDataStream(MessageContext msgContext) {
HttpCarbonMessage carbonMessage = (HttpCarbonMessage) msgContext.getProperty(BridgeConstants.HTTP_CARBON_MESSAGE);
if (Objects.isNull(carbonMessage)) {
return null;
}
BufferedInputStream bufferedInputStream;
if (msgContext.getProperty(PassThroughConstants.BUFFERED_INPUT_STREAM) != null) {
bufferedInputStream = (BufferedInputStream) msgContext.getProperty(PassThroughConstants.BUFFERED_INPUT_STREAM);
try {
bufferedInputStream.reset();
bufferedInputStream.mark(0);
} catch (Exception e) {
// just ignore the error
}
} else {
HttpMessageDataStreamer httpMessageDataStreamer = new HttpMessageDataStreamer(carbonMessage);
bufferedInputStream = new BufferedInputStream(httpMessageDataStreamer.getInputStream());
// Multiplied it by two because we always need a bigger read-limit than the buffer size.
bufferedInputStream.mark(Integer.MAX_VALUE);
msgContext.setProperty(PassThroughConstants.BUFFERED_INPUT_STREAM, bufferedInputStream);
}
return bufferedInputStream;
}
use of org.wso2.carbon.apimgt.api.model.policy.Limit in project wso2-synapse by wso2.
the class VFSTransportListener method scanFileOrDirectory.
/**
* Search for files that match the given regex pattern and create a list
* Then process each of these files and update the status of the scan on
* the poll table
* @param entry the poll table entry for the scan
* @param fileURI the file or directory to be scanned
*/
private void scanFileOrDirectory(final PollTableEntry entry, String fileURI) {
if (log.isDebugEnabled()) {
log.debug("Polling: " + VFSUtils.maskURLPassword(fileURI));
}
if (entry.isClusterAware()) {
boolean leader = true;
ClusteringAgent agent = getConfigurationContext().getAxisConfiguration().getClusteringAgent();
if (agent != null && agent.getParameter("domain") != null) {
// hazelcast clustering instance name
String hazelcastInstanceName = agent.getParameter("domain").getValue() + ".instance";
HazelcastInstance instance = Hazelcast.getHazelcastInstanceByName(hazelcastInstanceName);
if (instance != null) {
// dirty leader election
leader = instance.getCluster().getMembers().iterator().next().localMember();
} else {
log.warn("Clustering error, running the polling task in this node");
}
} else {
log.warn("Although proxy is cluster aware, clustering config are not present, hence running the" + " the polling task in this node");
}
if (!leader) {
if (log.isDebugEnabled()) {
log.debug("This Member is not the leader");
}
entry.setLastPollState(PollTableEntry.NONE);
long now = System.currentTimeMillis();
entry.setLastPollTime(now);
entry.setNextPollTime(now + entry.getPollInterval());
onPollCompletion(entry);
return;
}
if (log.isDebugEnabled()) {
log.debug("This Member is the leader");
}
}
FileSystemOptions fso = null;
setFileSystemClosed(false);
try {
fso = VFSUtils.attachFileSystemOptions(entry.getVfsSchemeProperties(), fsManager);
} catch (Exception e) {
log.error("Error while attaching VFS file system properties. " + e.getMessage());
}
FileObject fileObject = null;
// TODO : Trying to make the correct URL out of the malformed one.
if (fileURI.contains("vfs:")) {
fileURI = fileURI.substring(fileURI.indexOf("vfs:") + 4);
}
if (log.isDebugEnabled()) {
log.debug("Scanning directory or file : " + VFSUtils.maskURLPassword(fileURI));
}
boolean wasError = true;
int retryCount = 0;
int maxRetryCount = entry.getMaxRetryCount();
long reconnectionTimeout = entry.getReconnectTimeout();
while (wasError) {
try {
retryCount++;
fileObject = fsManager.resolveFile(fileURI, fso);
if (fileObject == null) {
log.error("fileObject is null");
throw new FileSystemException("fileObject is null");
}
wasError = false;
} catch (FileSystemException e) {
if (retryCount >= maxRetryCount) {
processFailure("Repeatedly failed to resolve the file URI: " + VFSUtils.maskURLPassword(fileURI), e, entry);
closeFileSystem(fileObject);
return;
} else {
log.warn("Failed to resolve the file URI: " + VFSUtils.maskURLPassword(fileURI) + ", in attempt " + retryCount + ", " + e.getMessage() + " Retrying in " + reconnectionTimeout + " milliseconds.");
}
} catch (Exception e) {
log.warn("Runtime error may have occurred. ", e);
closeFileSystem(fileObject);
}
if (wasError) {
try {
Thread.sleep(reconnectionTimeout);
} catch (InterruptedException e2) {
Thread.currentThread().interrupt();
log.error("Thread was interrupted while waiting to reconnect.", e2);
}
}
}
try {
if (fileObject.exists() && fileObject.isReadable()) {
entry.setLastPollState(PollTableEntry.NONE);
FileObject[] children = null;
try {
children = fileObject.getChildren();
} catch (FileNotFolderException ignored) {
} catch (FileSystemException ex) {
log.error(ex.getMessage(), ex);
}
// if this is a file that would translate to a single message
if (children == null || children.length == 0) {
boolean isFailedRecord = false;
if (entry.getMoveAfterMoveFailure() != null) {
isFailedRecord = isFailedRecord(fileObject, entry);
}
if (fileObject.getType() == FileType.FILE && !isFailedRecord) {
boolean runPostProcess = true;
if (!entry.isFileLockingEnabled() || (entry.isFileLockingEnabled() && acquireLock(fsManager, fileObject, entry, fso, true))) {
try {
if (fileObject.getType() == FileType.FILE) {
processFile(entry, fileObject);
entry.setLastPollState(PollTableEntry.SUCCSESSFUL);
metrics.incrementMessagesReceived();
} else {
runPostProcess = false;
}
} catch (AxisFault e) {
if (e.getCause() instanceof FileNotFoundException) {
log.warn("Error processing File URI : " + VFSUtils.maskURLPassword(fileObject.getName().toString()) + ". This can be due to file moved from another process.");
runPostProcess = false;
} else {
logException("Error processing File URI : " + VFSUtils.maskURLPassword(fileObject.getName().getURI()), e);
entry.setLastPollState(PollTableEntry.FAILED);
metrics.incrementFaultsReceiving();
}
}
if (runPostProcess) {
try {
moveOrDeleteAfterProcessing(entry, fileObject, fso);
} catch (AxisFault axisFault) {
logException("File object '" + VFSUtils.maskURLPassword(fileObject.getURL().toString()) + "' " + "cloud not be moved", axisFault);
entry.setLastPollState(PollTableEntry.FAILED);
String timeStamp = VFSUtils.getSystemTime(entry.getFailedRecordTimestampFormat());
addFailedRecord(entry, fileObject, timeStamp);
}
}
if (entry.isFileLockingEnabled()) {
VFSUtils.releaseLock(fsManager, fileObject, fso);
if (log.isDebugEnabled()) {
log.debug("Removed the lock file '" + VFSUtils.maskURLPassword(fileObject.toString()) + ".lock' of the file '" + VFSUtils.maskURLPassword(fileObject.toString()));
}
}
} else if (log.isDebugEnabled()) {
log.debug("Couldn't get the lock for processing the file : " + VFSUtils.maskURLPassword(fileObject.getName().getURI()));
} else if (isFailedRecord) {
if (entry.isFileLockingEnabled()) {
VFSUtils.releaseLock(fsManager, fileObject, fso);
}
// schedule a cleanup task if the file is there
if (fsManager.resolveFile(fileObject.getURL().toString(), fso) != null && removeTaskState == STATE_STOPPED && entry.getMoveAfterMoveFailure() != null) {
workerPool.execute(new FileRemoveTask(entry, fileObject, fso));
}
if (log.isDebugEnabled()) {
log.debug("File '" + VFSUtils.maskURLPassword(fileObject.getURL().toString()) + "' has been marked as a failed" + " record, it will not process");
}
}
}
} else {
int failCount = 0;
int successCount = 0;
int processCount = 0;
Integer iFileProcessingInterval = entry.getFileProcessingInterval();
Integer iFileProcessingCount = entry.getFileProcessingCount();
if (log.isDebugEnabled()) {
log.debug("File name pattern : " + entry.getFileNamePattern());
}
// Sort the files
String strSortParam = entry.getFileSortParam();
if (strSortParam != null) {
log.debug("Start Sorting the files.");
boolean bSortOrderAsscending = entry.isFileSortAscending();
if (log.isDebugEnabled()) {
log.debug("Sorting the files by : " + strSortParam + ". (" + bSortOrderAsscending + ")");
}
if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_NAME) && bSortOrderAsscending) {
Arrays.sort(children, new FileNameAscComparator());
} else if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_NAME) && !bSortOrderAsscending) {
Arrays.sort(children, new FileNameDesComparator());
} else if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_SIZE) && bSortOrderAsscending) {
Arrays.sort(children, new FileSizeAscComparator());
} else if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_SIZE) && !bSortOrderAsscending) {
Arrays.sort(children, new FileSizeDesComparator());
} else if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_LASTMODIFIEDTIMESTAMP) && bSortOrderAsscending) {
Arrays.sort(children, new FileLastmodifiedtimestampAscComparator());
} else if (strSortParam.equals(VFSConstants.FILE_SORT_VALUE_LASTMODIFIEDTIMESTAMP) && !bSortOrderAsscending) {
Arrays.sort(children, new FileLastmodifiedtimestampDesComparator());
}
log.debug("End Sorting the files.");
}
for (FileObject child : children) {
// Stop processing when service get undeployed
if (state != BaseConstants.STARTED || !entry.getService().isActive()) {
return;
}
/**
* Before starting to process another file, see whether the proxy is stopped or not.
*/
if (entry.isCanceled()) {
break;
}
// skipping *.lock file
if (child.getName().getBaseName().endsWith(".lock")) {
continue;
}
// skipping subfolders
if (child.getType() != FileType.FILE) {
continue;
}
// skipping files depending on size limitation
if (entry.getFileSizeLimit() >= 0 && child.getContent().getSize() > entry.getFileSizeLimit()) {
if (log.isDebugEnabled()) {
log.debug("Ignoring file - " + child.getName().getBaseName() + " size - " + child.getContent().getSize() + " since it exceeds file size limit - " + entry.getFileSizeLimit());
}
continue;
}
boolean isFailedRecord = false;
if (entry.getMoveAfterMoveFailure() != null) {
isFailedRecord = isFailedRecord(child, entry);
}
if (entry.getFileNamePattern() != null && child.getName().getBaseName().matches(entry.getFileNamePattern())) {
// now we try to get the lock and process
if (log.isDebugEnabled()) {
log.debug("Matching file : " + child.getName().getBaseName());
}
boolean runPostProcess = true;
if ((!entry.isFileLockingEnabled() || (entry.isFileLockingEnabled() && acquireLock(fsManager, child, entry, fso, true))) && !isFailedRecord) {
// process the file
try {
if (log.isDebugEnabled()) {
log.debug("Processing file :" + VFSUtils.maskURLPassword(child.toString()));
}
processCount++;
if (child.getType() == FileType.FILE) {
processFile(entry, child);
successCount++;
// tell moveOrDeleteAfterProcessing() file was success
entry.setLastPollState(PollTableEntry.SUCCSESSFUL);
metrics.incrementMessagesReceived();
} else {
runPostProcess = false;
}
} catch (Exception e) {
if (e.getCause() instanceof FileNotFoundException) {
log.warn("Error processing File URI : " + VFSUtils.maskURLPassword(child.getName().toString()) + ". This can be due to file moved from another process.");
runPostProcess = false;
} else {
logException("Error processing File URI : " + VFSUtils.maskURLPassword(child.getName().getURI()), e);
failCount++;
// tell moveOrDeleteAfterProcessing() file failed
entry.setLastPollState(PollTableEntry.FAILED);
metrics.incrementFaultsReceiving();
}
}
// skipping un-locking file if failed to do delete/move after process
boolean skipUnlock = false;
if (runPostProcess) {
try {
moveOrDeleteAfterProcessing(entry, child, fso);
} catch (AxisFault axisFault) {
logException("File object '" + VFSUtils.maskURLPassword(child.getURL().toString()) + "'cloud not be moved, will remain in \"locked\" state", axisFault);
skipUnlock = true;
failCount++;
entry.setLastPollState(PollTableEntry.FAILED);
String timeStamp = VFSUtils.getSystemTime(entry.getFailedRecordTimestampFormat());
addFailedRecord(entry, child, timeStamp);
}
}
// if there is a failure or not we'll try to release the lock
if (entry.isFileLockingEnabled() && !skipUnlock) {
VFSUtils.releaseLock(fsManager, child, fso);
}
}
} else if (entry.getFileNamePattern() != null && !child.getName().getBaseName().matches(entry.getFileNamePattern())) {
// child's file name does not match the file name pattern
if (log.isDebugEnabled()) {
log.debug("Non-Matching file : " + child.getName().getBaseName());
}
} else if (isFailedRecord) {
// it is a failed record
if (entry.isFileLockingEnabled()) {
VFSUtils.releaseLock(fsManager, child, fso);
VFSUtils.releaseLock(fsManager, fileObject, fso);
}
if (fsManager.resolveFile(child.getURL().toString(), fso) != null && removeTaskState == STATE_STOPPED && entry.getMoveAfterMoveFailure() != null) {
workerPool.execute(new FileRemoveTask(entry, child, fso));
}
if (log.isDebugEnabled()) {
log.debug("File '" + VFSUtils.maskURLPassword(fileObject.getURL().toString()) + "' has been marked as a failed record, it will not " + "process");
}
}
close(child);
if (iFileProcessingInterval != null && iFileProcessingInterval > 0) {
try {
if (log.isDebugEnabled()) {
log.debug("Put the VFS processor to sleep for : " + iFileProcessingInterval);
}
Thread.sleep(iFileProcessingInterval);
} catch (InterruptedException ie) {
log.error("Unable to set the interval between file processors." + ie);
Thread.currentThread().interrupt();
}
} else if (iFileProcessingCount != null && iFileProcessingCount <= processCount) {
break;
}
}
if (failCount == 0 && successCount > 0) {
entry.setLastPollState(PollTableEntry.SUCCSESSFUL);
} else if (successCount == 0 && failCount > 0) {
entry.setLastPollState(PollTableEntry.FAILED);
} else {
entry.setLastPollState(PollTableEntry.WITH_ERRORS);
}
}
// processing of this poll table entry is complete
long now = System.currentTimeMillis();
entry.setLastPollTime(now);
entry.setNextPollTime(now + entry.getPollInterval());
} else {
// The file object is not readable. Clean the cached connection to trigger
// the retry mechanism
closeFileSystem(fileObject);
if (log.isDebugEnabled()) {
log.debug("Unable to access or read file or directory : " + VFSUtils.maskURLPassword(fileURI) + "." + " Reason: " + (fileObject.exists() ? (fileObject.isReadable() ? "Unknown reason" : "The file can not be read!") : "The file does not exists!"));
}
}
onPollCompletion(entry);
} catch (FileSystemException e) {
closeFileSystem(fileObject);
processFailure("Error checking for existence and readability : " + VFSUtils.maskURLPassword(fileURI), e, entry);
} catch (Exception ex) {
closeFileSystem(fileObject);
processFailure("Un-handled exception thrown when processing the file : ", ex, entry);
}
}
Aggregations