use of org.apache.xmlrpc.XmlRpcException in project maven-plugins by apache.
the class TracDownloader method getIssueList.
public List<Issue> getIssueList() throws MalformedURLException, XmlRpcException {
// Create and configure an XML-RPC client
XmlRpcClientConfigImpl config = new XmlRpcClientConfigImpl();
try {
config.setServerURL(new URL(getUrl() + "/login/xmlrpc"));
} catch (MalformedURLException e) {
throw new MalformedURLException("The Trac URL is incorrect.");
}
config.setBasicUserName(tracUser);
config.setBasicPassword(tracPassword);
XmlRpcClient client = new XmlRpcClient();
client.setConfig(config);
client.setTransportFactory(new XmlRpcCommonsTransportFactory(client));
// Fetch issues
String qstr = "";
if (!StringUtils.isEmpty(query)) {
qstr = query;
}
Object[] params = new Object[] { qstr };
Object[] queryResult;
ArrayList<Issue> issueList = new ArrayList<Issue>();
try {
queryResult = (Object[]) client.execute("ticket.query", params);
for (Object aQueryResult : queryResult) {
params = new Object[] { aQueryResult };
Object[] ticketGetResult;
ticketGetResult = (Object[]) client.execute("ticket.get", params);
issueList.add(createIssue(ticketGetResult));
}
} catch (XmlRpcException e) {
throw new XmlRpcException("XmlRpc Error.", e);
}
return issueList;
}
use of org.apache.xmlrpc.XmlRpcException in project maven-plugins by apache.
the class TracMojo method executeReport.
public void executeReport(Locale locale) throws MavenReportException {
// Validate parameters
List<Integer> columnIds = IssuesReportHelper.getColumnIds(columnNames, TRAC_COLUMNS, DEPRECATED_TRAC_COLUMNS, getLog());
if (columnIds.size() == 0) {
// This can happen if the user has configured column names and they are all invalid
throw new MavenReportException("maven-changes-plugin: None of the configured columnNames '" + columnNames + "' are valid.");
}
try {
// Download issues
TracDownloader issueDownloader = new TracDownloader();
configureIssueDownloader(issueDownloader);
List<Issue> issueList = issueDownloader.getIssueList();
// Generate the report
IssuesReportGenerator report = new IssuesReportGenerator(IssuesReportHelper.toIntArray(columnIds));
if (issueList.isEmpty()) {
report.doGenerateEmptyReport(getBundle(locale), getSink());
getLog().warn("No ticket has matched.");
} else {
report.doGenerateReport(getBundle(locale), getSink(), issueList);
}
} catch (MalformedURLException e) {
// Rethrow this error so that the build fails
throw new MavenReportException("The Trac URL is incorrect.");
} catch (XmlRpcException e) {
// Rethrow this error so that the build fails
throw new MavenReportException("XmlRpc Error.", e);
} catch (Exception e) {
e.printStackTrace();
}
}
use of org.apache.xmlrpc.XmlRpcException in project cloudstack by apache.
the class Ovm3StoragePool method createRepo.
/**
* Create primary storage, which is a repository in OVM. Pooling is part of
* this too and clustering should be in the future.
*
* @param cmd
* @return
* @throws XmlRpcException
*/
private boolean createRepo(StorageFilerTO cmd) throws XmlRpcException {
String basePath = config.getAgentOvmRepoPath();
Repository repo = new Repository(c);
String primUuid = repo.deDash(cmd.getUuid());
String ovsRepo = basePath + "/" + primUuid;
/* should add port ? */
String mountPoint = String.format("%1$s:%2$s", cmd.getHost(), cmd.getPath());
String msg;
if (cmd.getType() == StoragePoolType.NetworkFilesystem) {
Boolean repoExists = false;
/* base repo first */
try {
repo.mountRepoFs(mountPoint, ovsRepo);
} catch (Ovm3ResourceException e) {
LOGGER.debug("Unable to mount NFS repository " + mountPoint + " on " + ovsRepo + " requested for " + config.getAgentHostname() + ": " + e.getMessage());
}
try {
repo.addRepo(mountPoint, ovsRepo);
repoExists = true;
} catch (Ovm3ResourceException e) {
LOGGER.debug("NFS repository " + mountPoint + " on " + ovsRepo + " not found creating repo: " + e.getMessage());
}
if (!repoExists) {
try {
/*
* a mount of the NFS fs by the createrepo actually
* generates a null if it is already mounted... -sigh-
*/
repo.createRepo(mountPoint, ovsRepo, primUuid, "OVS Repository");
} catch (Ovm3ResourceException e) {
msg = "NFS repository " + mountPoint + " on " + ovsRepo + " create failed!";
LOGGER.debug(msg);
throw new CloudRuntimeException(msg + " " + e.getMessage(), e);
}
}
/* add base pooling first */
if (config.getAgentInOvm3Pool()) {
try {
msg = "Configuring " + config.getAgentHostname() + "(" + config.getAgentIp() + ") for pool";
LOGGER.debug(msg);
setupPool(cmd);
msg = "Configured host for pool";
/* add clustering after pooling */
if (config.getAgentInOvm3Cluster()) {
msg = "Setup " + config.getAgentHostname() + "(" + config.getAgentIp() + ") for cluster";
LOGGER.debug(msg);
/* setup cluster */
/*
* From cluster.java
* configure_server_for_cluster(cluster conf, fs, mount,
* fsuuid, poolfsbaseuuid)
*/
/* create_cluster(poolfsuuid,) */
}
} catch (Ovm3ResourceException e) {
msg = "Unable to setup pool on " + config.getAgentHostname() + "(" + config.getAgentIp() + ") for " + ovsRepo;
throw new CloudRuntimeException(msg + " " + e.getMessage(), e);
}
} else {
msg = "no way dude I can't stand for this";
LOGGER.debug(msg);
}
/*
* this is to create the .generic_fs_stamp else we're not allowed to
* create any data\disks on this thing
*/
try {
URI uri = new URI(cmd.getType() + "://" + cmd.getHost() + ":" + +cmd.getPort() + cmd.getPath() + "/VirtualMachines");
setupNfsStorage(uri, cmd.getUuid());
} catch (Exception e) {
msg = "NFS mount " + mountPoint + " on " + config.getAgentSecStoragePath() + "/" + cmd.getUuid() + " create failed!";
throw new CloudRuntimeException(msg + " " + e.getMessage(), e);
}
} else {
msg = "NFS repository " + mountPoint + " on " + ovsRepo + " create failed, was type " + cmd.getType();
LOGGER.debug(msg);
return false;
}
try {
/* systemvm iso is imported here */
prepareSecondaryStorageStore(ovsRepo, cmd.getUuid(), cmd.getHost());
} catch (Exception e) {
msg = "systemvm.iso copy failed to " + ovsRepo;
LOGGER.debug(msg, e);
return false;
}
return true;
}
use of org.apache.xmlrpc.XmlRpcException in project cloudstack by apache.
the class CitrixResourceBase method getVMSnapshotChainSize.
public long getVMSnapshotChainSize(final Connection conn, final VolumeObjectTO volumeTo, final String vmName) throws BadServerResponse, XenAPIException, XmlRpcException {
if (volumeTo.getVolumeType() == Volume.Type.DATADISK) {
final VDI dataDisk = VDI.getByUuid(conn, volumeTo.getPath());
if (dataDisk != null) {
final String dataDiskName = dataDisk.getNameLabel(conn);
if (dataDiskName != null && !dataDiskName.isEmpty()) {
volumeTo.setName(dataDiskName);
}
}
}
final Set<VDI> allvolumeVDIs = VDI.getByNameLabel(conn, volumeTo.getName());
long size = 0;
for (final VDI vdi : allvolumeVDIs) {
try {
if (vdi.getIsASnapshot(conn) && vdi.getSmConfig(conn).get("vhd-parent") != null) {
final String parentUuid = vdi.getSmConfig(conn).get("vhd-parent");
final VDI parentVDI = VDI.getByUuid(conn, parentUuid);
// add size of snapshot vdi node, usually this only contains
// meta data
size = size + vdi.getPhysicalUtilisation(conn);
// add size of snapshot vdi parent, this contains data
if (!isRefNull(parentVDI)) {
size = size + parentVDI.getPhysicalUtilisation(conn).longValue();
}
}
} catch (final Exception e) {
s_logger.debug("Exception occurs when calculate snapshot capacity for volumes: due to " + e.toString());
continue;
}
}
if (volumeTo.getVolumeType() == Volume.Type.ROOT) {
final Map<VM, VM.Record> allVMs = VM.getAllRecords(conn);
// add size of memory snapshot vdi
if (allVMs != null && allVMs.size() > 0) {
for (final VM vmr : allVMs.keySet()) {
try {
final String vName = vmr.getNameLabel(conn);
if (vName != null && vName.contains(vmName) && vmr.getIsASnapshot(conn)) {
final VDI memoryVDI = vmr.getSuspendVDI(conn);
if (!isRefNull(memoryVDI)) {
size = size + memoryVDI.getPhysicalUtilisation(conn);
final VDI pMemoryVDI = memoryVDI.getParent(conn);
if (!isRefNull(pMemoryVDI)) {
size = size + pMemoryVDI.getPhysicalUtilisation(conn);
}
}
}
} catch (final Exception e) {
s_logger.debug("Exception occurs when calculate snapshot capacity for memory: due to " + e.toString());
continue;
}
}
}
}
return size;
}
use of org.apache.xmlrpc.XmlRpcException in project cloudstack by apache.
the class XcpServerDiscoverer method find.
@Override
public Map<? extends ServerResource, Map<String, String>> find(long dcId, Long podId, Long clusterId, URI url, String username, String password, List<String> hostTags) throws DiscoveryException {
Map<CitrixResourceBase, Map<String, String>> resources = new HashMap<CitrixResourceBase, Map<String, String>>();
Connection conn = null;
if (!url.getScheme().equals("http")) {
String msg = "urlString is not http so we're not taking care of the discovery for this: " + url;
s_logger.debug(msg);
return null;
}
if (clusterId == null) {
String msg = "must specify cluster Id when add host";
s_logger.debug(msg);
throw new RuntimeException(msg);
}
if (podId == null) {
String msg = "must specify pod Id when add host";
s_logger.debug(msg);
throw new RuntimeException(msg);
}
ClusterVO cluster = _clusterDao.findById(clusterId);
if (cluster == null || cluster.getHypervisorType() != HypervisorType.XenServer) {
if (s_logger.isInfoEnabled())
s_logger.info("invalid cluster id or cluster is not for XenServer hypervisors");
return null;
}
try {
String hostname = url.getHost();
InetAddress ia = InetAddress.getByName(hostname);
String hostIp = ia.getHostAddress();
Queue<String> pass = new LinkedList<String>();
pass.add(password);
conn = _connPool.getConnect(hostIp, username, pass);
if (conn == null) {
String msg = "Unable to get a connection to " + url;
s_logger.debug(msg);
throw new DiscoveryException(msg);
}
Set<Pool> pools = Pool.getAll(conn);
Pool pool = pools.iterator().next();
Pool.Record pr = pool.getRecord(conn);
String poolUuid = pr.uuid;
Map<Host, Host.Record> hosts = Host.getAllRecords(conn);
String latestHotFix = "";
if (poolHasHotFix(conn, hostIp, XenserverConfigs.XSHotFix62ESP1004)) {
latestHotFix = XenserverConfigs.XSHotFix62ESP1004;
} else if (poolHasHotFix(conn, hostIp, XenserverConfigs.XSHotFix62ESP1)) {
latestHotFix = XenserverConfigs.XSHotFix62ESP1;
}
/*set cluster hypervisor type to xenserver*/
ClusterVO clu = _clusterDao.findById(clusterId);
if (clu.getGuid() == null) {
setClusterGuid(clu, poolUuid);
} else {
List<HostVO> clusterHosts = _resourceMgr.listAllHostsInCluster(clusterId);
if (clusterHosts != null && clusterHosts.size() > 0) {
if (!clu.getGuid().equals(poolUuid)) {
String msg = "Please join the host " + hostIp + " to XS pool " + clu.getGuid() + " through XC/XS before adding it through CS UI";
s_logger.warn(msg);
throw new DiscoveryException(msg);
}
} else {
setClusterGuid(clu, poolUuid);
}
}
// can not use this conn after this point, because this host may join a pool, this conn is retired
if (conn != null) {
try {
Session.logout(conn);
} catch (Exception e) {
s_logger.debug("Caught exception during logout", e);
}
conn.dispose();
conn = null;
}
poolUuid = clu.getGuid();
_clusterDao.update(clusterId, clu);
if (_checkHvm) {
for (Map.Entry<Host, Host.Record> entry : hosts.entrySet()) {
Host.Record record = entry.getValue();
boolean support_hvm = false;
for (String capability : record.capabilities) {
if (capability.contains("hvm")) {
support_hvm = true;
break;
}
}
if (!support_hvm) {
String msg = "Unable to add host " + record.address + " because it doesn't support hvm";
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, msg, msg);
s_logger.debug(msg);
throw new RuntimeException(msg);
}
}
}
for (Map.Entry<Host, Host.Record> entry : hosts.entrySet()) {
Host.Record record = entry.getValue();
String hostAddr = record.address;
String prodVersion = CitrixHelper.getProductVersion(record);
String xenVersion = record.softwareVersion.get("xen");
String hostOS = record.softwareVersion.get("product_brand");
if (hostOS == null) {
hostOS = record.softwareVersion.get("platform_name");
}
String hostOSVer = prodVersion;
String hostKernelVer = record.softwareVersion.get("linux");
if (_resourceMgr.findHostByGuid(record.uuid) != null) {
s_logger.debug("Skipping " + record.address + " because " + record.uuid + " is already in the database.");
continue;
}
CitrixResourceBase resource = createServerResource(dcId, podId, record, latestHotFix);
s_logger.info("Found host " + record.hostname + " ip=" + record.address + " product version=" + prodVersion);
Map<String, String> details = new HashMap<String, String>();
Map<String, Object> params = new HashMap<String, Object>();
details.put("url", hostAddr);
details.put("username", username);
params.put("username", username);
details.put("password", password);
params.put("password", password);
params.put("zone", Long.toString(dcId));
params.put("guid", record.uuid);
params.put("pod", podId.toString());
params.put("cluster", clusterId.toString());
params.put("pool", poolUuid);
params.put("ipaddress", record.address);
details.put(HostInfo.HOST_OS, hostOS);
details.put(HostInfo.HOST_OS_VERSION, hostOSVer);
details.put(HostInfo.HOST_OS_KERNEL_VERSION, hostKernelVer);
details.put(HostInfo.HYPERVISOR_VERSION, xenVersion);
String privateNetworkLabel = _networkMgr.getDefaultManagementTrafficLabel(dcId, HypervisorType.XenServer);
String storageNetworkLabel = _networkMgr.getDefaultStorageTrafficLabel(dcId, HypervisorType.XenServer);
if (!params.containsKey("private.network.device") && privateNetworkLabel != null) {
params.put("private.network.device", privateNetworkLabel);
details.put("private.network.device", privateNetworkLabel);
}
if (!params.containsKey("storage.network.device1") && storageNetworkLabel != null) {
params.put("storage.network.device1", storageNetworkLabel);
details.put("storage.network.device1", storageNetworkLabel);
}
DataCenterVO zone = _dcDao.findById(dcId);
boolean securityGroupEnabled = zone.isSecurityGroupEnabled();
params.put("securitygroupenabled", Boolean.toString(securityGroupEnabled));
params.put("router.aggregation.command.each.timeout", _configDao.getValue(Config.RouterAggregationCommandEachTimeout.toString()));
params.put("wait", Integer.toString(_wait));
details.put("wait", Integer.toString(_wait));
params.put("migratewait", _configDao.getValue(Config.MigrateWait.toString()));
params.put(Config.XenServerMaxNics.toString().toLowerCase(), _configDao.getValue(Config.XenServerMaxNics.toString()));
params.put(Config.XenServerHeartBeatTimeout.toString().toLowerCase(), _configDao.getValue(Config.XenServerHeartBeatTimeout.toString()));
params.put(Config.XenServerHeartBeatInterval.toString().toLowerCase(), _configDao.getValue(Config.XenServerHeartBeatInterval.toString()));
params.put(Config.InstanceName.toString().toLowerCase(), _instance);
details.put(Config.InstanceName.toString().toLowerCase(), _instance);
try {
resource.configure("XenServer", params);
} catch (ConfigurationException e) {
_alertMgr.sendAlert(AlertManager.AlertType.ALERT_TYPE_HOST, dcId, podId, "Unable to add " + record.address, "Error is " + e.getMessage());
s_logger.warn("Unable to instantiate " + record.address, e);
continue;
}
resource.start();
resources.put(resource, details);
}
} catch (SessionAuthenticationFailed e) {
throw new DiscoveredWithErrorException("Authentication error");
} catch (XenAPIException e) {
s_logger.warn("XenAPI exception", e);
return null;
} catch (XmlRpcException e) {
s_logger.warn("Xml Rpc Exception", e);
return null;
} catch (UnknownHostException e) {
s_logger.warn("Unable to resolve the host name", e);
return null;
} catch (Exception e) {
s_logger.debug("other exceptions: " + e.toString(), e);
return null;
}
return resources;
}
Aggregations