use of com.emc.storageos.model.TaskResourceRep in project coprhd-controller by CoprHD.
the class ComputeSystemServiceApiTest method testInvalidCredsCreateComputeSystem.
@Test(groups = "runByDefault", timeOut = 100000)
public void testInvalidCredsCreateComputeSystem() throws Exception {
TaskResourceRep taskCreateComputeSystem = createAndDiscoverComputeSystem(EnvConfig.get("sanity", "ucsm.invalidhost"), EnvConfig.get("sanity", "ucsm.invalidhost.username"), EnvConfig.get("sanity", "ucsm.invalidhost.password"), "bad-creds-api-test-compute-system", 80);
Assert.assertNotNull(taskCreateComputeSystem, "Compute System Task should not be null");
Assert.assertNotNull(taskCreateComputeSystem.getOpId(), "Compute System Task Id should not be null");
Assert.assertNotNull(taskCreateComputeSystem.getResource(), "Task related resource should not be null");
invalidCredsComputeSystem = rSys.path(COMPUTE_SYSTEM_RESOURCE + "/" + taskCreateComputeSystem.getResource().getId()).get(ComputeSystemRestRep.class);
Assert.assertNotNull(invalidCredsComputeSystem, "Created Compute System should not be null!");
System.out.println("Created Compute System has id: " + invalidCredsComputeSystem.getId());
// Wait long enough for the Compute System to get discovered...
while (invalidCredsComputeSystem.getDiscoveryJobStatus().equals(DiscoveredDataObject.DataCollectionJobStatus.IN_PROGRESS.name())) {
invalidCredsComputeSystem = rSys.path(COMPUTE_SYSTEM_RESOURCE + "/" + taskCreateComputeSystem.getResource().getId()).get(ComputeSystemRestRep.class);
Thread.sleep(1000);
}
// Refresh the compute system!
invalidCredsComputeSystem = rSys.path(COMPUTE_SYSTEM_RESOURCE + "/" + taskCreateComputeSystem.getResource().getId()).get(ComputeSystemRestRep.class);
Assert.assertEquals(invalidCredsComputeSystem.getDiscoveryJobStatus(), DiscoveredDataObject.DataCollectionJobStatus.ERROR.name());
}
use of com.emc.storageos.model.TaskResourceRep in project coprhd-controller by CoprHD.
the class InternalApiTest method testInternalFileService.
@Test
public /**
* This test exercises only the server side functionaly, not the internal client
* @throws Exception
*/
void testInternalFileService() throws Exception {
// create fs
FileSystemParam fsparam = new FileSystemParam();
fsparam.setVpool(_cosId);
fsparam.setLabel("test-internalapi-" + System.currentTimeMillis());
fsparam.setVarray(_nhId);
fsparam.setSize("20971520");
URI path = URI.create(_apiServer).resolve("/internal/file/filesystems");
WebResource rRoot = _client.resource(path);
WebResource.Builder rBuilder = _requestHelper.addSignature(rRoot);
TaskResourceRep resp = _requestHelper.addToken(rBuilder, _rootToken).post(TaskResourceRep.class, fsparam);
Assert.assertTrue(resp != null);
Assert.assertNotNull(resp.getOpId());
Assert.assertNotNull(resp.getResource());
String fsId = resp.getResource().getId().toString();
String opId = resp.getOpId();
// GET filesystem
path = URI.create(_apiServer).resolve("/internal/file/filesystems/" + fsId);
rRoot = _client.resource(path);
rBuilder = _requestHelper.addSignature(rRoot);
ClientResponse response = _requestHelper.addToken(rBuilder, _rootToken).get(ClientResponse.class);
Assert.assertTrue(response != null);
Assert.assertEquals(200, response.getStatus());
// wait for the create to finish
path = URI.create(_apiServer).resolve("/internal/file/filesystems/" + fsId + "/tasks/" + opId);
int checkCount = 1200;
String status;
do {
// wait upto ~2 minute for fs creation
Thread.sleep(100);
rRoot = _client.resource(path);
TaskResourceRep fsResp = _requestHelper.addSignature(rRoot).get(TaskResourceRep.class);
status = fsResp.getState();
} while (status.equals("pending") && checkCount-- > 0);
if (!status.equals("ready")) {
Assert.assertTrue("Fileshare create timed out", false);
}
// export
path = URI.create(_apiServer).resolve("/internal/file/filesystems/" + fsId + "/exports");
FileSystemExportParam export = new FileSystemExportParam();
export.setPermissions("root");
export.setRootUserMapping("root");
export.setProtocol("NFS");
export.setEndpoints(new ArrayList<String>());
export.getEndpoints().add("www.ford.com");
rRoot = _client.resource(path);
rBuilder = _requestHelper.addSignature(rRoot);
resp = _requestHelper.addToken(rBuilder, _rootToken).post(TaskResourceRep.class, export);
opId = resp.getOpId();
// wait for the export to finish
path = URI.create(_apiServer).resolve("/internal/file/filesystems/" + fsId + "/tasks/" + opId);
do {
// wait upto ~2 minute for fs creation
Thread.sleep(100);
rRoot = _client.resource(path);
TaskResourceRep fsResp = _requestHelper.addSignature(rRoot).get(TaskResourceRep.class);
status = fsResp.getState();
} while (status.equals("pending") && checkCount-- > 0);
if (!status.equals("ready")) {
Assert.assertTrue("Fileshare export timed out", false);
}
// unexport
String unexportPath = String.format("/internal/file/filesystems/%s/exports/%s,%s,%s,%s", fsId, export.getProtocol(), export.getSecurityType(), export.getPermissions(), export.getRootUserMapping());
path = URI.create(_apiServer).resolve(unexportPath);
rRoot = _client.resource(path);
rBuilder = _requestHelper.addSignature(rRoot);
resp = _requestHelper.addToken(rBuilder, _rootToken).delete(TaskResourceRep.class, export);
opId = resp.getOpId();
// wait for the unexport to finish
path = URI.create(_apiServer).resolve("/internal/file/filesystems/" + fsId + "/tasks/" + opId);
do {
// wait upto ~2 minute for fs creation
Thread.sleep(100);
rRoot = _client.resource(path);
TaskResourceRep fsResp = _requestHelper.addSignature(rRoot).get(TaskResourceRep.class);
status = fsResp.getState();
} while (status.equals("pending") && checkCount-- > 0);
if (!status.equals("ready")) {
Assert.assertTrue("Fileshare unexport timed out", false);
}
// delete
path = URI.create(_apiServer).resolve("/internal/file/filesystems/" + fsId + "/deactivate");
FileSystemDeleteParam deleteParam = new FileSystemDeleteParam();
deleteParam.setForceDelete(false);
rRoot = _client.resource(path);
rBuilder = _requestHelper.addSignature(rRoot);
resp = _requestHelper.addToken(rBuilder, _rootToken).post(TaskResourceRep.class, deleteParam);
Assert.assertTrue(resp != null);
}
use of com.emc.storageos.model.TaskResourceRep in project coprhd-controller by CoprHD.
the class InternalApiTest method testFileServiceUsingInternalClient.
@Test
public /**
* This test exercises both server side and internal client
* @throws Exception
*/
void testFileServiceUsingInternalClient() throws Exception {
// create fs
FileSystemParam fsparam = new FileSystemParam();
fsparam.setVpool(_cosId);
fsparam.setLabel("test-internalapi-" + System.currentTimeMillis());
fsparam.setVarray(_nhId);
fsparam.setSize("20971520");
TaskResourceRep resp = _internalFileClient.createFileSystem(fsparam, _rootToken);
Assert.assertTrue(resp != null);
Assert.assertNotNull(resp.getOpId());
Assert.assertNotNull(resp.getResource());
URI fsId = resp.getResource().getId();
String opId = resp.getOpId();
// GET filesystem - no method on the client for this?
WebResource rRoot = _requestHelper.createRequest(_client, _apiServer, "/internal/file/filesystems/" + fsId);
WebResource.Builder rBuilder = _requestHelper.addSignature(rRoot);
ClientResponse response = _requestHelper.addToken(rBuilder, _rootToken).get(ClientResponse.class);
Assert.assertTrue(response != null);
Assert.assertEquals(200, response.getStatus());
// wait for the create to finish
int checkCount = 1200;
String status;
do {
// wait upto ~2 minute for fs creation
Thread.sleep(100);
TaskResourceRep fsResp = _internalFileClient.getTaskStatus(fsId, opId);
status = fsResp.getState();
} while (status.equals("pending") && checkCount-- > 0);
if (!status.equals("ready")) {
Assert.assertTrue("Fileshare create timed out", false);
}
// export
FileSystemExportParam export = new FileSystemExportParam();
export.setPermissions("root");
export.setRootUserMapping("root");
export.setProtocol("NFS");
export.setEndpoints(new ArrayList<String>());
export.getEndpoints().add("www.ford.com");
resp = _internalFileClient.exportFileSystem(fsId, export);
opId = resp.getOpId();
// wait for the export to finish
do {
// wait upto ~2 minute for fs creation
Thread.sleep(100);
TaskResourceRep fsResp = _internalFileClient.getTaskStatus(fsId, opId);
status = fsResp.getState();
} while (status.equals("pending") && checkCount-- > 0);
if (!status.equals("ready")) {
Assert.assertTrue("Fileshare export timed out", false);
}
// unexport
resp = _internalFileClient.unexportFileSystem(fsId, export.getProtocol(), export.getSecurityType(), export.getPermissions(), export.getRootUserMapping(), null);
opId = resp.getOpId();
// wait for the unexport to finish
do {
// wait upto ~2 minute for fs creation
Thread.sleep(100);
TaskResourceRep fsResp = _internalFileClient.getTaskStatus(fsId, opId);
status = fsResp.getState();
} while (status.equals("pending") && checkCount-- > 0);
if (!status.equals("ready")) {
Assert.assertTrue("Fileshare unexport timed out", false);
}
// delete
FileSystemDeleteParam deleteParam = new FileSystemDeleteParam();
deleteParam.setForceDelete(false);
resp = _internalFileClient.deactivateFileSystem(fsId, _rootToken, deleteParam);
Assert.assertTrue(resp != null);
}
use of com.emc.storageos.model.TaskResourceRep in project coprhd-controller by CoprHD.
the class UnManagedVolumeService method ingestExportedVolumes.
/**
* Ingest Exported Volumes
*
* For each UnManaged Volume Find the list of masking views this volume
* is exposed to.
*
* If only 1 masking view verify if all the initiators are available on
* the existing MV. Verify the storage Ports are available in given
* VArray Verify if this export mask is available already If not, then
* create a new Export Mask with the storage Ports, initiators from
* ViPr. Else, add volume to export mask.
*
* If more than 1 masking view verify if all the initiators are
* available on all existing MVs. Verify the storage Ports within each
* Masking view are available in given VArray. Verify if this export
* mask is available already If not, then create a new Export Mask with
* the storage Ports, initiators from ViPr. Else, add volume to export
* mask.
*
* @param exportIngestParam
* @brief Add volumes to new or existing export masks; create masks when needed
* @return TaskList
* @throws InternalException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/ingest-exported")
@CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.RESTRICTED_SYSTEM_ADMIN })
public TaskList ingestExportedVolumes(VolumeExportIngestParam exportIngestParam) throws InternalException {
TaskList taskList = new TaskList();
Map<String, TaskResourceRep> taskMap = new HashMap<String, TaskResourceRep>();
BaseIngestionRequestContext requestContext = null;
try {
if (exportIngestParam.getUnManagedVolumes().size() > getMaxBulkSize()) {
throw APIException.badRequests.exceedingLimit("unmanaged volumes", getMaxBulkSize());
}
Project project = _permissionsHelper.getObjectById(exportIngestParam.getProject(), Project.class);
ArgValidator.checkEntity(project, exportIngestParam.getProject(), false);
VirtualArray varray = VolumeIngestionUtil.getVirtualArrayForVolumeCreateRequest(project, exportIngestParam.getVarray(), _permissionsHelper, _dbClient);
VirtualPool vpool = VolumeIngestionUtil.getVirtualPoolForVolumeCreateRequest(project, exportIngestParam.getVpool(), _permissionsHelper, _dbClient);
// allow ingestion for VPool without Virtual Arrays
if (null != vpool.getVirtualArrays() && !vpool.getVirtualArrays().isEmpty() && !vpool.getVirtualArrays().contains(exportIngestParam.getVarray().toString())) {
throw APIException.internalServerErrors.virtualPoolNotMatchingVArray(exportIngestParam.getVarray());
}
// check for Quotas
long unManagedVolumesCapacity = VolumeIngestionUtil.getTotalUnManagedVolumeCapacity(_dbClient, exportIngestParam.getUnManagedVolumes());
_logger.info("UnManagedVolume provisioning quota validation successful");
TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, project.getTenantOrg().getURI());
CapacityUtils.validateQuotasForProvisioning(_dbClient, vpool, project, tenant, unManagedVolumesCapacity, "volume");
VolumeIngestionUtil.checkIngestionRequestValidForUnManagedVolumes(exportIngestParam.getUnManagedVolumes(), vpool, _dbClient);
requestContext = new BaseIngestionRequestContext(_dbClient, exportIngestParam.getUnManagedVolumes(), vpool, varray, project, tenant, exportIngestParam.getVplexIngestionMethod());
while (requestContext.hasNext()) {
UnManagedVolume unManagedVolume = requestContext.next();
if (null == unManagedVolume) {
_logger.warn("No Unmanaged Volume with URI {} found in database. Continuing...", requestContext.getCurrentUnManagedVolumeUri());
continue;
}
String taskId = UUID.randomUUID().toString();
Operation operation = _dbClient.createTaskOpStatus(UnManagedVolume.class, requestContext.getCurrentUnManagedVolumeUri(), taskId, ResourceOperationTypeEnum.INGEST_EXPORTED_BLOCK_OBJECTS);
TaskResourceRep task = toTask(unManagedVolume, taskId, operation);
taskMap.put(unManagedVolume.getId().toString(), task);
}
taskList.getTaskList().addAll(taskMap.values());
// find or create ExportGroup for this set of volumes being ingested
URI exportGroupResourceUri = null;
String resourceType = ExportGroupType.Host.name();
String computeResourcelabel = null;
if (null != exportIngestParam.getCluster()) {
resourceType = ExportGroupType.Cluster.name();
Cluster cluster = _dbClient.queryObject(Cluster.class, exportIngestParam.getCluster());
exportGroupResourceUri = cluster.getId();
computeResourcelabel = cluster.getLabel();
requestContext.setCluster(exportIngestParam.getCluster());
} else {
Host host = _dbClient.queryObject(Host.class, exportIngestParam.getHost());
exportGroupResourceUri = host.getId();
computeResourcelabel = host.getHostName();
requestContext.setHost(exportIngestParam.getHost());
}
ExportGroup exportGroup = VolumeIngestionUtil.verifyExportGroupExists(requestContext, requestContext.getProject().getId(), exportGroupResourceUri, exportIngestParam.getVarray(), resourceType, _dbClient);
if (null == exportGroup) {
_logger.info("Creating Export Group with label {}", computeResourcelabel);
ResourceAndUUIDNameGenerator nameGenerator = new ResourceAndUUIDNameGenerator();
exportGroup = VolumeIngestionUtil.initializeExportGroup(requestContext.getProject(), resourceType, exportIngestParam.getVarray(), computeResourcelabel, _dbClient, nameGenerator, requestContext.getTenant());
requestContext.setExportGroupCreated(true);
}
requestContext.setExportGroup(exportGroup);
_logger.info("ExportGroup {} created ", exportGroup.forDisplay());
IngestVolumesExportedSchedulingThread.executeApiTask(_asyncTaskService.getExecutorService(), requestContext, ingestStrategyFactory, this, _dbClient, taskMap, taskList);
} catch (InternalException e) {
_logger.error("InternalException occurred due to: {}", e);
throw e;
} catch (Exception e) {
_logger.error("Unexpected exception occurred due to: {}", e);
throw APIException.internalServerErrors.genericApisvcError(ExceptionUtils.getExceptionMessage(e), e);
}
return taskList;
}
use of com.emc.storageos.model.TaskResourceRep in project coprhd-controller by CoprHD.
the class UnManagedVolumeService method ingestVolumes.
/**
* UnManaged volumes are volumes, which are present within ViPR
* storage systems, but have not been ingested by ViPR. Volume ingest is the process of
* moving unmanaged volumes under ViPR management and provides the flexibility of
* determining which volumes are ingested by ViPR A virtual pool, project, and virtual
* array must be associated with an unmanaged volume before it can be ingested by ViPR
* List of supported virtual pools for each unmanaged volume is exposed using
* /vdc/unmanaged/volumes/bulk. Using unsupported virtual pool would result in an error.
* Size of unmanaged volumes which can be ingested via a single API Call is limited to
* 4000.
*
* @param param
* parameters required for unmanaged volume ingestion
* @prereq none
* @brief Ingest unmanaged volumes
* @throws InternalException
*/
@POST
@Consumes({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Produces({ MediaType.APPLICATION_XML, MediaType.APPLICATION_JSON })
@Path("/ingest")
@CheckPermission(roles = { Role.SYSTEM_ADMIN, Role.RESTRICTED_SYSTEM_ADMIN })
public TaskList ingestVolumes(VolumeIngest param) throws InternalException {
if (param.getUnManagedVolumes().size() > getMaxBulkSize()) {
throw APIException.badRequests.exceedingLimit("unmanaged volumes", getMaxBulkSize());
}
TaskList taskList = new TaskList();
Map<String, String> taskMap = new HashMap<String, String>();
BaseIngestionRequestContext requestContext = null;
try {
// Get and validate the project.
Project project = _permissionsHelper.getObjectById(param.getProject(), Project.class);
ArgValidator.checkEntity(project, param.getProject(), false);
// Get and validate the varray
VirtualArray varray = VolumeIngestionUtil.getVirtualArrayForVolumeCreateRequest(project, param.getVarray(), _permissionsHelper, _dbClient);
// Get and validate the vpool.
VirtualPool vpool = VolumeIngestionUtil.getVirtualPoolForVolumeCreateRequest(project, param.getVpool(), _permissionsHelper, _dbClient);
// allow ingestion for VPool without Virtual Arrays
if (null != vpool.getVirtualArrays() && !vpool.getVirtualArrays().isEmpty() && !vpool.getVirtualArrays().contains(param.getVarray().toString())) {
throw APIException.internalServerErrors.virtualPoolNotMatchingVArray(param.getVarray());
}
// check for Quotas
long unManagedVolumesCapacity = VolumeIngestionUtil.getTotalUnManagedVolumeCapacity(_dbClient, param.getUnManagedVolumes());
TenantOrg tenant = _dbClient.queryObject(TenantOrg.class, project.getTenantOrg().getURI());
CapacityUtils.validateQuotasForProvisioning(_dbClient, vpool, project, tenant, unManagedVolumesCapacity, "volume");
_logger.info("UnManagedVolume provisioning quota validation successful for {}", unManagedVolumesCapacity);
requestContext = new BaseIngestionRequestContext(_dbClient, param.getUnManagedVolumes(), vpool, varray, project, tenant, param.getVplexIngestionMethod());
while (requestContext.hasNext()) {
UnManagedVolume unManagedVolume = requestContext.next();
if (null == unManagedVolume) {
_logger.info("No Unmanaged Volume with URI {} found in database. Continuing...", requestContext.getCurrentUnManagedVolumeUri());
continue;
}
String taskId = UUID.randomUUID().toString();
Operation operation = _dbClient.createTaskOpStatus(UnManagedVolume.class, unManagedVolume.getId(), taskId, ResourceOperationTypeEnum.INGEST_VOLUMES);
TaskResourceRep task = toTask(unManagedVolume, taskId, operation);
taskList.getTaskList().add(task);
taskMap.put(unManagedVolume.getId().toString(), taskId);
}
IngestVolumesUnexportedSchedulingThread.executeApiTask(_asyncTaskService.getExecutorService(), requestContext, ingestStrategyFactory, this, _dbClient, taskMap, taskList);
} catch (InternalException e) {
throw e;
} catch (Exception e) {
_logger.debug("Unexpected ingestion exception:", e);
throw APIException.internalServerErrors.genericApisvcError(ExceptionUtils.getExceptionMessage(e), e);
}
return taskList;
}
Aggregations