use of org.apache.geode.distributed.DistributedMember in project geode by apache.
the class MemberCommands method listMember.
@CliCommand(value = { CliStrings.LIST_MEMBER }, help = CliStrings.LIST_MEMBER__HELP)
@CliMetaData(shellOnly = false, relatedTopic = CliStrings.TOPIC_GEODE_SERVER)
@ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
public Result listMember(@CliOption(key = { CliStrings.LIST_MEMBER__GROUP }, unspecifiedDefaultValue = "", optionContext = ConverterHint.MEMBERGROUP, help = CliStrings.LIST_MEMBER__GROUP__HELP) String group) {
Result result = null;
// TODO: Add the code for identifying the system services
try {
Set<DistributedMember> memberSet = new TreeSet<DistributedMember>();
InternalCache cache = getCache();
// default get all the members in the DS
if (group.isEmpty()) {
memberSet.addAll(CliUtil.getAllMembers(cache));
} else {
memberSet.addAll(cache.getDistributedSystem().getGroupMembers(group));
}
if (memberSet.isEmpty()) {
result = ResultBuilder.createInfoResult(CliStrings.LIST_MEMBER__MSG__NO_MEMBER_FOUND);
} else {
TabularResultData resultData = ResultBuilder.createTabularResultData();
Iterator<DistributedMember> memberIters = memberSet.iterator();
while (memberIters.hasNext()) {
DistributedMember member = memberIters.next();
resultData.accumulate("Name", member.getName());
resultData.accumulate("Id", member.getId());
}
result = ResultBuilder.buildResult(resultData);
}
} catch (Exception e) {
result = ResultBuilder.createGemFireErrorResult("Could not fetch the list of members. " + e.getMessage());
LogWrapper.getInstance().warning(e.getMessage(), e);
}
return result;
}
use of org.apache.geode.distributed.DistributedMember in project geode by apache.
the class MemberCommands method describeMember.
@CliCommand(value = { CliStrings.DESCRIBE_MEMBER }, help = CliStrings.DESCRIBE_MEMBER__HELP)
@CliMetaData(shellOnly = false, relatedTopic = CliStrings.TOPIC_GEODE_SERVER)
@ResourceOperation(resource = Resource.CLUSTER, operation = Operation.READ)
public Result describeMember(@CliOption(key = CliStrings.DESCRIBE_MEMBER__IDENTIFIER, optionContext = ConverterHint.ALL_MEMBER_IDNAME, help = CliStrings.DESCRIBE_MEMBER__HELP, mandatory = true) String memberNameOrId) {
Result result = null;
try {
DistributedMember memberToBeDescribed = CliUtil.getDistributedMemberByNameOrId(memberNameOrId);
if (memberToBeDescribed != null) {
// This information should be available through the MBeans too. We might not need
// the function.
// Yes, but then the command is subject to Mbean availability, which would be
// affected once MBean filters are used.
ResultCollector<?, ?> rc = CliUtil.executeFunction(getMemberInformation, null, memberToBeDescribed);
ArrayList<?> output = (ArrayList<?>) rc.getResult();
Object obj = output.get(0);
if (obj != null && (obj instanceof MemberInformation)) {
CompositeResultData crd = ResultBuilder.createCompositeResultData();
MemberInformation memberInformation = (MemberInformation) obj;
memberInformation.setName(memberToBeDescribed.getName());
memberInformation.setId(memberToBeDescribed.getId());
memberInformation.setHost(memberToBeDescribed.getHost());
memberInformation.setProcessId("" + memberToBeDescribed.getProcessId());
SectionResultData section = crd.addSection();
section.addData("Name", memberInformation.getName());
section.addData("Id", memberInformation.getId());
section.addData("Host", memberInformation.getHost());
section.addData("Regions", CliUtil.convertStringSetToString(memberInformation.getHostedRegions(), '\n'));
section.addData("PID", memberInformation.getProcessId());
section.addData("Groups", memberInformation.getGroups());
section.addData("Used Heap", memberInformation.getHeapUsage() + "M");
section.addData("Max Heap", memberInformation.getMaxHeapSize() + "M");
String offHeapMemorySize = memberInformation.getOffHeapMemorySize();
if (offHeapMemorySize != null && !offHeapMemorySize.isEmpty()) {
section.addData("Off Heap Size", offHeapMemorySize);
}
section.addData("Working Dir", memberInformation.getWorkingDirPath());
section.addData("Log file", memberInformation.getLogFilePath());
section.addData("Locators", memberInformation.getLocators());
if (memberInformation.isServer()) {
SectionResultData clientServiceSection = crd.addSection();
List<CacheServerInfo> csList = memberInformation.getCacheServeInfo();
if (csList != null) {
Iterator<CacheServerInfo> iters = csList.iterator();
clientServiceSection.setHeader("Cache Server Information");
while (iters.hasNext()) {
CacheServerInfo cacheServerInfo = iters.next();
clientServiceSection.addData("Server Bind", cacheServerInfo.getBindAddress());
clientServiceSection.addData("Server Port", cacheServerInfo.getPort());
clientServiceSection.addData("Running", cacheServerInfo.isRunning());
}
clientServiceSection.addData("Client Connections", memberInformation.getClientCount());
}
}
result = ResultBuilder.buildResult(crd);
} else {
result = ResultBuilder.createInfoResult(CliStrings.format(CliStrings.DESCRIBE_MEMBER__MSG__INFO_FOR__0__COULD_NOT_BE_RETRIEVED, new Object[] { memberNameOrId }));
}
} else {
result = ResultBuilder.createInfoResult(CliStrings.format(CliStrings.DESCRIBE_MEMBER__MSG__NOT_FOUND, new Object[] { memberNameOrId }));
}
} catch (CacheClosedException e) {
} catch (FunctionInvocationTargetException e) {
result = ResultBuilder.createGemFireErrorResult(e.getMessage());
} catch (Exception e) {
result = ResultBuilder.createGemFireErrorResult(e.getMessage());
}
return result;
}
use of org.apache.geode.distributed.DistributedMember in project geode by apache.
the class MiscellaneousCommands method gc.
@CliCommand(value = CliStrings.GC, help = CliStrings.GC__HELP)
@CliMetaData(relatedTopic = { CliStrings.TOPIC_GEODE_DEBUG_UTIL })
@ResourceOperation(resource = Resource.CLUSTER, operation = Operation.MANAGE)
public Result gc(@CliOption(key = CliStrings.GC__GROUP, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, help = CliStrings.GC__GROUP__HELP) String[] groups, @CliOption(key = CliStrings.GC__MEMBER, optionContext = ConverterHint.ALL_MEMBER_IDNAME, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, help = CliStrings.GC__MEMBER__HELP) String memberId) {
InternalCache cache = getCache();
Result result = null;
CompositeResultData gcResultTable = ResultBuilder.createCompositeResultData();
TabularResultData resultTable = gcResultTable.addSection().addTable("Table1");
String headerText = "GC Summary";
resultTable.setHeader(headerText);
Set<DistributedMember> dsMembers = new HashSet<DistributedMember>();
if (memberId != null && memberId.length() > 0) {
DistributedMember member = CliUtil.getDistributedMemberByNameOrId(memberId);
if (member == null) {
return ResultBuilder.createGemFireErrorResult(memberId + CliStrings.GC__MSG__MEMBER_NOT_FOUND);
}
dsMembers.add(member);
result = executeAndBuildResult(resultTable, dsMembers);
} else if (groups != null && groups.length > 0) {
for (String group : groups) {
dsMembers.addAll(cache.getDistributedSystem().getGroupMembers(group));
}
result = executeAndBuildResult(resultTable, dsMembers);
} else {
// gc on entire cluster
// exclude locators
dsMembers = CliUtil.getAllNormalMembers(cache);
result = executeAndBuildResult(resultTable, dsMembers);
}
return result;
}
use of org.apache.geode.distributed.DistributedMember in project geode by apache.
the class DataCommands method executeRebalanceOnDS.
private Result executeRebalanceOnDS(InternalCache cache, String simulate, String[] excludeRegionsList) {
Result result = null;
int index = 1;
CompositeResultData rebalanceResultData = ResultBuilder.createCompositeResultData();
List<String> listExcludedRegion = new ArrayList<>();
if (excludeRegionsList != null) {
Collections.addAll(listExcludedRegion, excludeRegionsList);
}
List<MemberPRInfo> listMemberRegion = getMemberRegionList(cache, listExcludedRegion);
if (listMemberRegion.size() == 0) {
return ResultBuilder.createInfoResult(CliStrings.REBALANCE__MSG__NO_REBALANCING_REGIONS_ON_DS);
}
Iterator<MemberPRInfo> iterator = listMemberRegion.iterator();
boolean flagToContinueWithRebalance = false;
// check if list has some members that can be rebalanced
while (iterator.hasNext()) {
if (iterator.next().dsMemberList.size() > 1) {
flagToContinueWithRebalance = true;
break;
}
}
if (!flagToContinueWithRebalance) {
return ResultBuilder.createInfoResult(CliStrings.REBALANCE__MSG__NO_REBALANCING_REGIONS_ON_DS);
}
for (MemberPRInfo memberPR : listMemberRegion) {
try {
// check if there are more than one members associated with region for rebalancing
if (memberPR.dsMemberList.size() > 1) {
for (int i = 0; i < memberPR.dsMemberList.size(); i++) {
DistributedMember dsMember = memberPR.dsMemberList.get(i);
Function rebalanceFunction = new RebalanceFunction();
Object[] functionArgs = new Object[3];
functionArgs[0] = simulate;
Set<String> regionSet = new HashSet<>();
regionSet.add(memberPR.region);
functionArgs[1] = regionSet;
Set<String> excludeRegionSet = new HashSet<>();
functionArgs[2] = excludeRegionSet;
List resultList = null;
try {
if (checkMemberPresence(dsMember, cache)) {
resultList = (ArrayList) CliUtil.executeFunction(rebalanceFunction, functionArgs, dsMember).getResult();
if (checkResultList(rebalanceResultData, resultList, dsMember)) {
result = ResultBuilder.buildResult(rebalanceResultData);
continue;
}
List<String> rstList = tokenize((String) resultList.get(0), ",");
result = ResultBuilder.buildResult(toCompositeResultData(rebalanceResultData, (ArrayList) rstList, index, simulate.equals("true"), cache));
index++;
// Rebalancing for region is done so break and continue with other region
break;
} else {
if (i == memberPR.dsMemberList.size() - 1) {
rebalanceResultData.addSection().addData(CliStrings.format(CliStrings.REBALANCE__MSG__NO_EXECUTION_FOR_REGION_0_ON_MEMBERS_1, memberPR.region, listOfAllMembers(memberPR.dsMemberList)), CliStrings.REBALANCE__MSG__MEMBERS_MIGHT_BE_DEPARTED);
result = ResultBuilder.buildResult(rebalanceResultData);
} else {
continue;
}
}
} catch (Exception ex) {
if (i == memberPR.dsMemberList.size() - 1) {
rebalanceResultData.addSection().addData(CliStrings.format(CliStrings.REBALANCE__MSG__NO_EXECUTION_FOR_REGION_0_ON_MEMBERS_1, memberPR.region, listOfAllMembers(memberPR.dsMemberList)), CliStrings.REBALANCE__MSG__REASON + ex.getMessage());
result = ResultBuilder.buildResult(rebalanceResultData);
} else {
continue;
}
}
if (checkResultList(rebalanceResultData, resultList, dsMember)) {
result = ResultBuilder.buildResult(rebalanceResultData);
continue;
}
List<String> rstList = tokenize((String) resultList.get(0), ",");
result = ResultBuilder.buildResult(toCompositeResultData(rebalanceResultData, (ArrayList) rstList, index, simulate.equals("true"), cache));
index++;
}
}
} catch (Exception e) {
ErrorResultData errorResultData = ResultBuilder.createErrorResultData().setErrorCode(ResultBuilder.ERRORCODE_DEFAULT).addLine(e.getMessage());
return (ResultBuilder.buildResult(errorResultData));
}
}
return result;
}
use of org.apache.geode.distributed.DistributedMember in project geode by apache.
the class DiskStoreCommands method compactDiskStore.
@CliCommand(value = CliStrings.COMPACT_DISK_STORE, help = CliStrings.COMPACT_DISK_STORE__HELP)
@CliMetaData(shellOnly = false, relatedTopic = { CliStrings.TOPIC_GEODE_DISKSTORE })
@ResourceOperation(resource = Resource.DATA, operation = Operation.MANAGE)
public Result compactDiskStore(@CliOption(key = CliStrings.COMPACT_DISK_STORE__NAME, mandatory = true, optionContext = ConverterHint.DISKSTORE, help = CliStrings.COMPACT_DISK_STORE__NAME__HELP) String diskStoreName, @CliOption(key = CliStrings.COMPACT_DISK_STORE__GROUP, unspecifiedDefaultValue = CliMetaData.ANNOTATION_NULL_VALUE, help = CliStrings.COMPACT_DISK_STORE__GROUP__HELP) String[] groups) {
Result result = null;
try {
// disk store exists validation
if (!diskStoreExists(diskStoreName)) {
result = ResultBuilder.createUserErrorResult(CliStrings.format(CliStrings.COMPACT_DISK_STORE__DISKSTORE_0_DOESNOT_EXIST, new Object[] { diskStoreName }));
} else {
InternalDistributedSystem ds = getCache().getInternalDistributedSystem();
Map<DistributedMember, PersistentID> overallCompactInfo = new HashMap<DistributedMember, PersistentID>();
Set<?> otherMembers = ds.getDistributionManager().getOtherNormalDistributionManagerIds();
Set<InternalDistributedMember> allMembers = new HashSet<InternalDistributedMember>();
for (Object member : otherMembers) {
allMembers.add((InternalDistributedMember) member);
}
allMembers.add(ds.getDistributedMember());
otherMembers = null;
String groupInfo = "";
// if groups are specified, find members in the specified group
if (groups != null && groups.length > 0) {
groupInfo = CliStrings.format(CliStrings.COMPACT_DISK_STORE__MSG__FOR_GROUP, new Object[] { Arrays.toString(groups) + "." });
final Set<InternalDistributedMember> selectedMembers = new HashSet<InternalDistributedMember>();
List<String> targetedGroups = Arrays.asList(groups);
for (Iterator<InternalDistributedMember> iterator = allMembers.iterator(); iterator.hasNext(); ) {
InternalDistributedMember member = iterator.next();
List<String> memberGroups = member.getGroups();
if (!Collections.disjoint(targetedGroups, memberGroups)) {
selectedMembers.add(member);
}
}
allMembers = selectedMembers;
}
// have at least one member
if (allMembers.isEmpty()) {
result = ResultBuilder.createUserErrorResult(CliStrings.format(CliStrings.COMPACT_DISK_STORE__NO_MEMBERS_FOUND_IN_SPECIFED_GROUP, new Object[] { Arrays.toString(groups) }));
} else {
// first invoke on local member if it exists in the targeted set
if (allMembers.remove(ds.getDistributedMember())) {
PersistentID compactedDiskStoreId = CompactRequest.compactDiskStore(diskStoreName);
if (compactedDiskStoreId != null) {
overallCompactInfo.put(ds.getDistributedMember(), compactedDiskStoreId);
}
}
// CompactRequest. Otherwise, send the request to others
if (!allMembers.isEmpty()) {
// Invoke compact on all 'other' members
Map<DistributedMember, PersistentID> memberCompactInfo = CompactRequest.send(ds.getDistributionManager(), diskStoreName, allMembers);
if (memberCompactInfo != null && !memberCompactInfo.isEmpty()) {
overallCompactInfo.putAll(memberCompactInfo);
memberCompactInfo.clear();
}
String notExecutedMembers = CompactRequest.getNotExecutedMembers();
LogWrapper.getInstance().info("compact disk-store \"" + diskStoreName + "\" message was scheduled to be sent to but was not send to " + notExecutedMembers);
}
// If compaction happened at all, then prepare the summary
if (overallCompactInfo != null && !overallCompactInfo.isEmpty()) {
CompositeResultData compositeResultData = ResultBuilder.createCompositeResultData();
SectionResultData section = null;
Set<Entry<DistributedMember, PersistentID>> entries = overallCompactInfo.entrySet();
for (Entry<DistributedMember, PersistentID> entry : entries) {
String memberId = entry.getKey().getId();
section = compositeResultData.addSection(memberId);
section.addData("On Member", memberId);
PersistentID persistentID = entry.getValue();
if (persistentID != null) {
SectionResultData subSection = section.addSection("DiskStore" + memberId);
subSection.addData("UUID", persistentID.getUUID());
subSection.addData("Host", persistentID.getHost().getHostName());
subSection.addData("Directory", persistentID.getDirectory());
}
}
compositeResultData.setHeader("Compacted " + diskStoreName + groupInfo);
result = ResultBuilder.buildResult(compositeResultData);
} else {
result = ResultBuilder.createInfoResult(CliStrings.COMPACT_DISK_STORE__COMPACTION_ATTEMPTED_BUT_NOTHING_TO_COMPACT);
}
}
// all members' if
}
// disk store exists' if
} catch (RuntimeException e) {
LogWrapper.getInstance().info(e.getMessage(), e);
result = ResultBuilder.createGemFireErrorResult(CliStrings.format(CliStrings.COMPACT_DISK_STORE__ERROR_WHILE_COMPACTING_REASON_0, new Object[] { e.getMessage() }));
}
return result;
}
Aggregations