use of org.apache.geode.cache.execute.ResultCollector in project geode by apache.
the class QueryUsingFunctionContextDUnitTest method runLDSQueryOnClientUsingFunc.
/**
* Runs a {@link LocalDataSet} query on a single server.
*
* @param func
* @param filter
* @param query
* @return results in a List
*/
private ArrayList runLDSQueryOnClientUsingFunc(Function func, Set filter, String query) {
ResultCollector rcollector = null;
// Filter can not be set as null if withFilter() is called.
rcollector = FunctionService.onServer(ClientCacheFactory.getAnyInstance()).setArguments(new Object[] { query, filter }).execute(func);
Object result = rcollector.getResult();
assertTrue(result instanceof ArrayList);
// Results from multiple nodes.
ArrayList resultList = (ArrayList) result;
resultList.trimToSize();
List queryResults = new ArrayList();
if (resultList.size() != 0 && resultList.get(0) instanceof ArrayList) {
for (Object obj : resultList) {
if (obj != null) {
queryResults.addAll((ArrayList) obj);
}
}
}
return (ArrayList) queryResults;
}
use of org.apache.geode.cache.execute.ResultCollector in project geode by apache.
the class QueryUsingFunctionContextDUnitTest method testQueriesWithFilterKeysOnPRWithBucketDestroy.
/**
*
*/
@Test
public void testQueriesWithFilterKeysOnPRWithBucketDestroy() {
IgnoredException.addIgnoredException("QueryInvocationTargetException");
Object[][] r = new Object[queries.length][2];
Set filter = new HashSet();
// Close cache on server1
server1.invoke(new CacheSerializableRunnable("Set QueryObserver in cache on server1") {
@Override
public void run2() throws CacheException {
class MyQueryObserver extends IndexTrackingQueryObserver {
@Override
public void startQuery(Query query) {
Region pr = CacheFactory.getAnyInstance().getRegion(PartitionedRegionName1);
Region KeyRegion = null;
for (int i = 0; i < 7; i++) {
KeyRegion = ((PartitionedRegion) pr).getBucketRegion(i);
if (KeyRegion != null)
KeyRegion.destroyRegion();
}
}
}
;
QueryObserverHolder.setInstance(new MyQueryObserver());
}
});
client.invoke(new CacheSerializableRunnable("Run function on PR") {
@Override
public void run2() throws CacheException {
Set filter = new HashSet();
ResultCollector rcollector = null;
filter.addAll(getFilter(0, 19));
for (int i = 0; i < queries.length; i++) {
try {
function = new TestQueryFunction("queryFunctionBucketDestroy");
rcollector = FunctionService.onRegion(CacheFactory.getAnyInstance().getRegion(PartitionedRegionName1)).setArguments(queries[i]).withFilter(filter).execute(function);
// Should not come here, an exception is expected from above function call.
fail("Function call did not fail for query with function context");
} catch (FunctionException ex) {
// ex.printStackTrace();
if (!(ex.getCause() instanceof QueryInvocationTargetException)) {
fail("Should have received an QueryInvocationTargetException but recieved" + ex.getMessage());
}
}
}
// For loop ends here.
}
});
// Close cache on server1
server1.invoke(new CacheSerializableRunnable("Reset Query Observer on server1") {
@Override
public void run2() throws CacheException {
QueryObserverHolder.reset();
}
});
}
use of org.apache.geode.cache.execute.ResultCollector in project geode by apache.
the class PartitionedRegionEquiJoinIntegrationTest method executeQueries.
@Override
protected Object[] executeQueries(String[] queries) {
ResultCollector collector = FunctionService.onRegion(region1).setArguments(queries).execute(equijoinTestFunction.getId());
Object result = collector.getResult();
return (Object[]) ((ArrayList) result).get(0);
}
use of org.apache.geode.cache.execute.ResultCollector in project geode by apache.
the class MBeanProxyInvocationHandler method delegateToFucntionService.
/**
* It will call the Generic function to execute the method on the remote VM
*
* @param objectName ObjectName of the MBean
* @param methodName method name
* @param args arguments to the methods
* @param signature signature of the method
* @return result Object
*/
protected Object delegateToFucntionService(ObjectName objectName, String methodName, Object[] args, String[] signature) throws Throwable {
Object[] functionArgs = new Object[5];
functionArgs[0] = objectName;
functionArgs[1] = methodName;
functionArgs[2] = signature;
functionArgs[3] = args;
functionArgs[4] = member.getName();
List<Object> result = null;
try {
ResultCollector rc = FunctionService.onMember(member).setArguments(functionArgs).execute(ManagementConstants.MGMT_FUNCTION_ID);
result = (List<Object>) rc.getResult();
// Exceptions of ManagementFunctions
} catch (Exception e) {
if (logger.isDebugEnabled()) {
logger.debug(" Exception while Executing Funtion {}", e.getMessage(), e);
}
// Only in case of Exception caused for Function framework.
return null;
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable th) {
SystemFailure.checkFailure();
if (logger.isDebugEnabled()) {
logger.debug(" Exception while Executing Funtion {}", th.getMessage(), th);
}
return null;
}
return checkErrors(result.get(ManagementConstants.RESULT_INDEX));
}
use of org.apache.geode.cache.execute.ResultCollector in project geode by apache.
the class IndexCommands method destroyIndex.
@CliCommand(value = CliStrings.DESTROY_INDEX, help = CliStrings.DESTROY_INDEX__HELP)
@CliMetaData(shellOnly = false, relatedTopic = { CliStrings.TOPIC_GEODE_REGION, CliStrings.TOPIC_GEODE_DATA })
public Result destroyIndex(@CliOption(key = CliStrings.DESTROY_INDEX__NAME, mandatory = false, unspecifiedDefaultValue = "", help = CliStrings.DESTROY_INDEX__NAME__HELP) final String indexName, @CliOption(key = CliStrings.DESTROY_INDEX__REGION, mandatory = false, optionContext = ConverterHint.REGION_PATH, help = CliStrings.DESTROY_INDEX__REGION__HELP) final String regionPath, @CliOption(key = CliStrings.DESTROY_INDEX__MEMBER, mandatory = false, optionContext = ConverterHint.MEMBERIDNAME, help = CliStrings.DESTROY_INDEX__MEMBER__HELP) final String[] memberNameOrID, @CliOption(key = CliStrings.DESTROY_INDEX__GROUP, mandatory = false, optionContext = ConverterHint.MEMBERGROUP, help = CliStrings.DESTROY_INDEX__GROUP__HELP) final String[] group) {
Result result = null;
if (StringUtils.isBlank(indexName) && StringUtils.isBlank(regionPath) && ArrayUtils.isEmpty(group) && ArrayUtils.isEmpty(memberNameOrID)) {
return ResultBuilder.createUserErrorResult(CliStrings.format(CliStrings.PROVIDE_ATLEAST_ONE_OPTION, CliStrings.DESTROY_INDEX));
}
String regionName = null;
final Cache cache = CacheFactory.getAnyInstance();
// requires data manage permission on all regions
if (StringUtils.isNotBlank(regionPath)) {
regionName = regionPath.startsWith("/") ? regionPath.substring(1) : regionPath;
this.securityService.authorizeRegionManage(regionName);
} else {
this.securityService.authorizeDataManage();
}
IndexInfo indexInfo = new IndexInfo(indexName, regionName);
Set<DistributedMember> targetMembers = CliUtil.findMembers(group, memberNameOrID);
if (targetMembers.isEmpty()) {
return ResultBuilder.createUserErrorResult(CliStrings.NO_MEMBERS_FOUND_MESSAGE);
}
ResultCollector rc = CliUtil.executeFunction(destroyIndexFunction, indexInfo, targetMembers);
List<Object> funcResults = (List<Object>) rc.getResult();
Set<String> successfulMembers = new TreeSet<String>();
Map<String, Set<String>> indexOpFailMap = new HashMap<String, Set<String>>();
AtomicReference<XmlEntity> xmlEntity = new AtomicReference<>();
for (Object funcResult : funcResults) {
if (!(funcResult instanceof CliFunctionResult)) {
continue;
}
CliFunctionResult cliFunctionResult = (CliFunctionResult) funcResult;
if (cliFunctionResult.isSuccessful()) {
successfulMembers.add(cliFunctionResult.getMemberIdOrName());
if (xmlEntity.get() == null) {
xmlEntity.set(cliFunctionResult.getXmlEntity());
}
} else {
String exceptionMessage = cliFunctionResult.getMessage();
Set<String> failedMembers = indexOpFailMap.get(exceptionMessage);
if (failedMembers == null) {
failedMembers = new TreeSet<String>();
}
failedMembers.add(cliFunctionResult.getMemberIdOrName());
indexOpFailMap.put(exceptionMessage, failedMembers);
}
}
if (!successfulMembers.isEmpty()) {
InfoResultData infoResult = ResultBuilder.createInfoResultData();
if (StringUtils.isNotBlank(indexName)) {
if (StringUtils.isNotBlank(regionPath)) {
infoResult.addLine(CliStrings.format(CliStrings.DESTROY_INDEX__ON__REGION__SUCCESS__MSG, indexName, regionPath));
} else {
infoResult.addLine(CliStrings.format(CliStrings.DESTROY_INDEX__SUCCESS__MSG, indexName));
}
} else {
if (StringUtils.isNotBlank(regionPath)) {
infoResult.addLine(CliStrings.format(CliStrings.DESTROY_INDEX__ON__REGION__ONLY__SUCCESS__MSG, regionPath));
} else {
infoResult.addLine(CliStrings.DESTROY_INDEX__ON__MEMBERS__ONLY__SUCCESS__MSG);
}
}
int num = 0;
for (String memberId : successfulMembers) {
infoResult.addLine(CliStrings.format(CliStrings.format(CliStrings.DESTROY_INDEX__NUMBER__AND__MEMBER, ++num, memberId)));
;
}
result = ResultBuilder.buildResult(infoResult);
} else {
ErrorResultData erd = ResultBuilder.createErrorResultData();
if (StringUtils.isNotBlank(indexName)) {
erd.addLine(CliStrings.format(CliStrings.DESTROY_INDEX__FAILURE__MSG, indexName));
} else {
erd.addLine("Indexes could not be destroyed for following reasons");
}
Set<String> exceptionMessages = indexOpFailMap.keySet();
for (String exceptionMessage : exceptionMessages) {
erd.addLine(CliStrings.format(CliStrings.DESTROY_INDEX__REASON_MESSAGE, exceptionMessage));
erd.addLine(CliStrings.DESTROY_INDEX__EXCEPTION__OCCURRED__ON);
Set<String> memberIds = indexOpFailMap.get(exceptionMessage);
int num = 0;
for (String memberId : memberIds) {
erd.addLine(CliStrings.format(CliStrings.format(CliStrings.DESTROY_INDEX__NUMBER__AND__MEMBER, ++num, memberId)));
}
erd.addLine("");
}
result = ResultBuilder.buildResult(erd);
}
if (xmlEntity.get() != null) {
persistClusterConfiguration(result, () -> getSharedConfiguration().deleteXmlEntity(xmlEntity.get(), group));
}
return result;
}
Aggregations