use of com.emc.storageos.systemservices.impl.resource.util.NodeInfo in project coprhd-controller by CoprHD.
the class BackupService method collectData.
public void collectData(BackupFileSet files, OutputStream outStream) throws IOException {
ZipOutputStream zos = new ZipOutputStream(outStream);
zos.setLevel(Deflater.BEST_SPEED);
List<String> uniqueNodes = new ArrayList<String>();
uniqueNodes.addAll(files.uniqueNodes());
List<NodeInfo> nodes = ClusterNodesUtil.getClusterNodeInfo(uniqueNodes);
if (nodes.size() < uniqueNodes.size()) {
log.info("Only {}/{} nodes available for the backup, cannot download.", uniqueNodes.size(), nodes.size());
return;
}
Collections.sort(nodes, new Comparator<NodeInfo>() {
@Override
public int compare(NodeInfo o1, NodeInfo o2) {
return o1.getId().compareTo(o2.getId());
}
});
URI postUri = SysClientFactory.URI_NODE_BACKUPS_DOWNLOAD;
boolean propertiesFileFound = false;
int collectFileCount = 0;
int totalFileCount = files.size() * 2;
String backupTag = files.first().tag;
// upload *_info.properties file first
for (final NodeInfo node : nodes) {
String baseNodeURL = String.format(SysClientFactory.BASE_URL_FORMAT, node.getIpAddress(), node.getPort());
SysClientFactory.SysClient sysClient = SysClientFactory.getSysClient(URI.create(baseNodeURL));
try {
String fileName = backupTag + BackupConstants.BACKUP_INFO_SUFFIX;
String fullFileName = backupTag + File.separator + fileName;
InputStream in = sysClient.post(postUri, InputStream.class, fullFileName);
newZipEntry(zos, in, fileName);
propertiesFileFound = true;
break;
} catch (Exception ex) {
log.info("info.properties file is not found on node {}, exception {}", node.getId(), ex.getMessage());
}
}
if (!propertiesFileFound) {
throw new FileNotFoundException(String.format("No live node contains %s%s", backupTag, BackupConstants.BACKUP_INFO_SUFFIX));
}
for (final NodeInfo node : nodes) {
String baseNodeURL = String.format(SysClientFactory.BASE_URL_FORMAT, node.getIpAddress(), node.getPort());
SysClientFactory.SysClient sysClient = SysClientFactory.getSysClient(URI.create(baseNodeURL));
for (String fileName : getFileNameList(files.subsetOf(null, null, node.getId()))) {
int progress = collectFileCount / totalFileCount * 100;
backupScheduler.getUploadExecutor().setUploadStatus(null, Status.IN_PROGRESS, progress, null);
String fullFileName = backupTag + File.separator + fileName;
InputStream in = sysClient.post(postUri, InputStream.class, fullFileName);
newZipEntry(zos, in, fileName);
collectFileCount++;
}
}
// We only close ZIP stream when everything is OK, or the package will be extractable but missing files.
zos.close();
log.info("Successfully generated ZIP package");
}
use of com.emc.storageos.systemservices.impl.resource.util.NodeInfo in project coprhd-controller by CoprHD.
the class HealthMonitorService method getStats.
/**
* Get statistics of virtual machine and its active services
* Virtual machine stats include memory usage, I/O for each device,
* load average numbers
* Service stats include service memory usage, command that invoked it,
* file descriptors count and other stats (uptime, start time, thread count).
* <p/>
* If interval value is passed it will return differential disk stats: difference between first report (contains stats for the time
* since system startup) and second report (stats collected during the interval since the first report).
*
* @brief Show disk, memory, service statistics of all virtual machines
* @param nodeIds node ids for which stats are collected.
* @param nodeNames node names for which stats are collected.
* @param interval Specifies amount of time in seconds for differential stats.
* @prereq none
* @return Stats response
*/
@GET
@Path("/stats")
@CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.SYSTEM_MONITOR, Role.SECURITY_ADMIN })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
public StatsRestRep getStats(@QueryParam("node_id") List<String> nodeIds, @QueryParam("interval") int interval, @QueryParam("node_name") List<String> nodeNames) {
nodeIds = _coordinatorClientExt.combineNodeNamesWithNodeIds(nodeNames, nodeIds);
_log.info("Retrieving stats for nodes. Requested node ids: {}", nodeIds);
StatsRestRep statsRestRep = new StatsRestRep();
List<NodeInfo> nodeInfoList = ClusterNodesUtil.getClusterNodeInfo(nodeIds);
// Validate 'interval'
if (interval < 0) {
throw APIException.badRequests.parameterIsNotValid("interval");
}
RequestParams requestParams = new RequestParams(interval);
Map<String, NodeStats> nodesData = NodeDataCollector.getDataFromNodes(nodeInfoList, INTERNAL_NODE_STATS_URI, Action.POST, requestParams, NodeStats.class, null);
statsRestRep.getNodeStatsList().addAll(nodesData.values());
return statsRestRep;
}
use of com.emc.storageos.systemservices.impl.resource.util.NodeInfo in project coprhd-controller by CoprHD.
the class LogService method validateNodeIds.
/**
* Validates that the passed list specifies valid Bourne node Ids. Note that
* an empty list is perfectly valid and means the service will process all
* Bourne nodes in the cluster.
*
* @param nodeIds A list of the node ids for the Bourne nodes from which the
* logs are to be collected.
* @throws APIException if the list contains an invalid node id.
*/
private void validateNodeIds(List<String> nodeIds) {
// a cluster node with each of the requested ids.
if (nodeIds == null || nodeIds.isEmpty()) {
return;
}
List<NodeInfo> nodeInfoList = ClusterNodesUtil.getClusterNodeInfo();
List<String> validNodeIds = new ArrayList<String>(nodeInfoList.size());
for (NodeInfo node : nodeInfoList) {
validNodeIds.add(node.getId());
}
List<String> nodeIdsClone = new ArrayList<String>(nodeIds);
nodeIdsClone.removeAll(validNodeIds);
if (!nodeIdsClone.isEmpty()) {
throw APIException.badRequests.parameterIsNotValid("node id");
}
}
use of com.emc.storageos.systemservices.impl.resource.util.NodeInfo in project coprhd-controller by CoprHD.
the class BaseLogManager method getClusterNodesWithIds.
/**
* Gets a reference to the node info for the nodes in the Bourne cluster
* with the passed nodes identifiers.
*
* @param nodeIds The ids of the desired cluster nodes.
* @return A list containing the connection info for the desired nodes.
* @throws APIException When an exception occurs trying to get the
* cluster nodes.
*/
protected List<NodeInfo> getClusterNodesWithIds(List<String> nodeIds) {
List<NodeInfo> matchingNodes = new ArrayList<NodeInfo>();
List<NodeInfo> nodeInfoList = ClusterNodesUtil.getClusterNodeInfo();
for (NodeInfo node : nodeInfoList) {
if (nodeIds.contains(node.getId())) {
matchingNodes.add(node);
}
}
return matchingNodes;
}
use of com.emc.storageos.systemservices.impl.resource.util.NodeInfo in project coprhd-controller by CoprHD.
the class LogLevelManager method propagate.
// Building the internal log URI for each node and calling using client
// Collecting streams from all nodes - if any node does not send response,
// logging error.
private LogLevels propagate(List<NodeInfo> nodeInfos, List<String> logNames, LogSeverity severity, int expirInMin, String scope) {
LogLevels nodeLogLevels = new LogLevels();
for (final NodeInfo node : nodeInfos) {
String baseNodeURL = String.format(BASE_URL_FORMAT, node.getIpAddress(), node.getPort());
_log.debug("processing node: " + baseNodeURL);
SysClientFactory.SysClient sysClient = SysClientFactory.getSysClient(URI.create(baseNodeURL), _propertiesLoader.getNodeLogCollectorTimeout() * 1000, _propertiesLoader.getNodeLogConnectionTimeout() * 1000);
try {
LogLevelRequest nodeLogReqInfo = new LogLevelRequest(new ArrayList<String>() {
{
add(node.getId());
}
}, logNames, severity, expirInMin, scope);
LogLevels nodeResp = sysClient.post(SysClientFactory.URI_LOG_LEVELS, LogLevels.class, nodeLogReqInfo);
nodeLogLevels.getLogLevels().addAll(nodeResp.getLogLevels());
} catch (Exception e) {
_log.error("Exception accessing node {}:", baseNodeURL, e);
}
}
return nodeLogLevels;
}
Aggregations