use of com.emc.storageos.systemservices.impl.resource.util.NodeInfo in project coprhd-controller by CoprHD.
the class LogService method getLogs.
/**
* Get log data from the specified virtual machines that are filtered, merged,
* and sorted based on the passed request parameters and streams the log
* messages back to the client as JSON formatted strings.
*
* @brief Show logs from all or specified virtual machine
* @param nodeIds The ids of the virtual machines for which log data is
* collected.
* Allowed values: standalone,
* control nodes: vipr1,vipr2 etc
* data services nodes: dataservice-10-111-111-222 (node-ip-address)
* @param nodeNames The custom names of the vipr nodes for which log data is
* collected.
* Allowed values: Current values of node_x_name properties
* @param logNames The names of the log files to process.
* @param severity The minimum severity level for a logged message.
* Allowed values:0-9. Default value: 7
* @param startTimeStr The start datetime of the desired time window. Value is
* inclusive.
* Allowed values: "yyyy-MM-dd_HH:mm:ss" formatted date or
* datetime in ms.
* Default: Set to yesterday same time
* @param endTimeStr The end datetime of the desired time window. Value is
* inclusive.
* Allowed values: "yyyy-MM-dd_HH:mm:ss" formatted date or
* datetime in ms.
* @param msgRegex A regular expression to which the log message conforms.
* @param maxCount Maximum number of log messages to retrieve. This may return
* more than max count, if there are more messages with same
* timestamp as of the latest message.
* Value should be greater than 0.
* @param dryRun if true, the API will do a dry run for log collection. Instead
* of collecting logs from nodes, dry run will check the nodes'
* availability for collecting logs. Entity body of the response
* will return an error message string indicating which node(s)
* not available for collecting logs. If log collection is ok
* for all specified nodes, no error message is included in
* response.
* Default value of this parameter is false.
* @prereq none
* @return A reference to the StreamingOutput to which the log data is
* written.
* @throws WebApplicationException When an invalid request is made.
*/
@GET
@CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.SYSTEM_MONITOR, Role.SECURITY_ADMIN })
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML, MediaType.TEXT_PLAIN })
public Response getLogs(@QueryParam(LogRequestParam.NODE_ID) List<String> nodeIds, @QueryParam(LogRequestParam.NODE_NAME) List<String> nodeNames, @QueryParam(LogRequestParam.LOG_NAME) List<String> logNames, @DefaultValue(LogSeverity.DEFAULT_VALUE_AS_STR) @QueryParam(LogRequestParam.SEVERITY) int severity, @QueryParam(LogRequestParam.START_TIME) String startTimeStr, @QueryParam(LogRequestParam.END_TIME) String endTimeStr, @QueryParam(LogRequestParam.MSG_REGEX) String msgRegex, @QueryParam(LogRequestParam.MAX_COUNT) int maxCount, @QueryParam(LogRequestParam.DRY_RUN) @DefaultValue("false") boolean dryRun) throws Exception {
_log.info("Received getlogs request");
enforceRunningRequestLimit();
final MediaType mediaType = getMediaType();
_log.info("Logs request media type {}", mediaType);
nodeIds = _coordinatorClientExt.combineNodeNamesWithNodeIds(nodeNames, nodeIds);
// Validate the passed node ids.
validateNodeIds(nodeIds);
_log.debug("Validated requested nodes");
// Validate the passed severity is valid.
validateLogSeverity(severity);
_log.debug("Validated requested severity");
// Validate the passed start and end times are valid.
Date startTime = TimeUtils.getDateTimestamp(startTimeStr);
Date endTime = TimeUtils.getDateTimestamp(endTimeStr);
TimeUtils.validateTimestamps(startTime, endTime);
_log.debug("Validated requested time window");
// Setting default start time to yesterday
if (startTime == null) {
Calendar yesterday = Calendar.getInstance();
yesterday.add(Calendar.DATE, -1);
startTime = yesterday.getTime();
_log.info("Setting start time to yesterday {} ", startTime);
}
// Validate regular message
validateMsgRegex(msgRegex);
_log.debug("Validated regex");
// Validate max count
if (maxCount < 0) {
throw APIException.badRequests.parameterIsNotValid("maxCount");
}
// validate log names
Set<String> allLogNames = getValidLogNames();
_log.debug("valid log names {}", allLogNames);
boolean invalidLogName = false;
for (String logName : logNames) {
if (!allLogNames.contains(logName)) {
invalidLogName = true;
break;
}
}
if (invalidLogName) {
throw APIException.badRequests.parameterIsNotValid("log names");
}
if (dryRun) {
List<NodeInfo> clusterNodesInfo = ClusterNodesUtil.getClusterNodeInfo();
if (clusterNodesInfo.isEmpty()) {
_log.error("No nodes available for collecting logs");
throw APIException.internalServerErrors.noNodeAvailableError("no nodes available for collecting logs");
}
List<NodeInfo> matchingNodes = null;
if (nodeIds.isEmpty()) {
matchingNodes = clusterNodesInfo;
} else {
matchingNodes = new ArrayList<NodeInfo>();
for (NodeInfo node : clusterNodesInfo) {
if (nodeIds.contains(node.getId())) {
matchingNodes.add(node);
}
}
}
// find the unavailable nodes
List<String> failedNodes = null;
if (matchingNodes.size() == 1 && matchingNodes.get(0).getId().equals("standalone")) {
failedNodes = new ArrayList<String>();
} else {
// find the unavailable nodes
failedNodes = _coordinatorClientExt.getUnavailableControllerNodes();
}
if (!nodeIds.isEmpty()) {
failedNodes.retainAll(nodeIds);
}
String baseNodeURL;
SysClientFactory.SysClient sysClient;
for (final NodeInfo node : matchingNodes) {
baseNodeURL = String.format(SysClientFactory.BASE_URL_FORMAT, node.getIpAddress(), node.getPort());
_log.debug("getting log names from node: " + baseNodeURL);
sysClient = SysClientFactory.getSysClient(URI.create(baseNodeURL), _logSvcPropertiesLoader.getNodeLogCollectorTimeout() * 1000, _logSvcPropertiesLoader.getNodeLogConnectionTimeout() * 1000);
LogRequest logReq = new LogRequest.Builder().nodeIds(nodeIds).baseNames(getLogNamesFromAlias(logNames)).logLevel(severity).startTime(startTime).endTime(endTime).regex(msgRegex).maxCont(maxCount).build();
logReq.setDryRun(true);
try {
sysClient.post(SysClientFactory.URI_NODE_LOGS, null, logReq);
} catch (Exception e) {
_log.error("Exception accessing node {}: {}", baseNodeURL, e);
failedNodes.add(node.getId());
}
}
if (_coordinatorClientExt.getNodeCount() == failedNodes.size()) {
throw APIException.internalServerErrors.noNodeAvailableError("All nodes are unavailable for collecting logs");
}
return Response.ok().build();
}
LogRequest logReqInfo = new LogRequest.Builder().nodeIds(nodeIds).baseNames(getLogNamesFromAlias(logNames)).logLevel(severity).startTime(startTime).endTime(endTime).regex(msgRegex).maxCont(maxCount).build();
_log.info("log request info is {}", logReqInfo.toString());
final LogNetworkStreamMerger logRequestMgr = new LogNetworkStreamMerger(logReqInfo, mediaType, _logSvcPropertiesLoader);
StreamingOutput logMsgStream = new StreamingOutput() {
@Override
public void write(OutputStream outputStream) {
try {
runningRequests.incrementAndGet();
logRequestMgr.streamLogs(outputStream);
} finally {
runningRequests.decrementAndGet();
}
}
};
return Response.ok(logMsgStream).build();
}
use of com.emc.storageos.systemservices.impl.resource.util.NodeInfo in project coprhd-controller by CoprHD.
the class LogLevelManager method process.
/**
* @throws APIException When an error occurs satisfying the log request.
*/
public LogLevels process() {
List<NodeInfo> nodeInfo;
int expirInMin;
// Getting all nodes information
if (_logReqInfo.getNodeIds().isEmpty()) {
_log.debug("No nodes specified, assuming all nodes");
nodeInfo = ClusterNodesUtil.getClusterNodeInfo();
} else {
nodeInfo = getClusterNodesWithIds(_logReqInfo.getNodeIds());
}
if (nodeInfo.isEmpty()) {
throw APIException.internalServerErrors.noNodeAvailableError("update log levels");
}
LogLevelRequest logLevelReq = (LogLevelRequest) _logReqInfo;
if (logLevelReq.getExpirInMin() == null) {
_log.debug("No expiration specified, asuming default value");
expirInMin = _propertiesLoader.getLogLevelExpiration();
} else {
expirInMin = logLevelReq.getExpirInMin();
}
return propagate(nodeInfo, _logReqInfo.getLogNames(), _logReqInfo.getSeverity(), expirInMin, logLevelReq.getScope());
}
use of com.emc.storageos.systemservices.impl.resource.util.NodeInfo in project coprhd-controller by CoprHD.
the class LogNetworkStreamMerger method getClusterNodesWithIds.
private List<NodeInfo> getClusterNodesWithIds(List<String> nodeIds) {
List<NodeInfo> matchingNodes = new ArrayList<>();
List<NodeInfo> nodeInfoList = ClusterNodesUtil.getClusterNodeInfo();
for (NodeInfo node : nodeInfoList) {
if (nodeIds.contains(node.getId())) {
matchingNodes.add(node);
}
}
return matchingNodes;
}
use of com.emc.storageos.systemservices.impl.resource.util.NodeInfo in project coprhd-controller by CoprHD.
the class NodeInfoTest method testGetIpAddress.
/**
* Tests the getIpAddress method.
*/
@Test
public void testGetIpAddress() {
boolean wasException = false;
try {
NodeInfo nodeInfo = new NodeInfo(TEST_ID, TEST_NAME, TEST_ENDPOIT);
Assert.assertEquals(nodeInfo.getIpAddress(), TEST_HOST);
} catch (Exception e) {
wasException = true;
}
Assert.assertFalse(wasException);
}
use of com.emc.storageos.systemservices.impl.resource.util.NodeInfo in project coprhd-controller by CoprHD.
the class NodeInfoTest method testGetName.
/**
* Tests the getName method.
*/
@Test
public void testGetName() {
boolean wasException = false;
try {
NodeInfo nodeInfo = new NodeInfo(TEST_ID, TEST_NAME, TEST_ENDPOIT);
Assert.assertEquals(nodeInfo.getName(), TEST_NAME);
} catch (Exception e) {
wasException = true;
}
Assert.assertFalse(wasException);
}
Aggregations