Search in sources :

Example 1 with OmMultipartUploadCompleteInfo

use of org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo in project ozone by apache.

the class RpcClient method completeMultipartUpload.

@Override
public OmMultipartUploadCompleteInfo completeMultipartUpload(String volumeName, String bucketName, String keyName, String uploadID, Map<Integer, String> partsMap) throws IOException {
    verifyVolumeName(volumeName);
    verifyBucketName(bucketName);
    HddsClientUtils.checkNotNull(keyName, uploadID);
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setMultipartUploadID(uploadID).setAcls(getAclList()).build();
    OmMultipartUploadCompleteList omMultipartUploadCompleteList = new OmMultipartUploadCompleteList(partsMap);
    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = ozoneManagerClient.completeMultipartUpload(keyArgs, omMultipartUploadCompleteList);
    return omMultipartUploadCompleteInfo;
}
Also used : OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo) OmMultipartUploadCompleteList(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList) CacheBuilder(com.google.common.cache.CacheBuilder) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs)

Example 2 with OmMultipartUploadCompleteInfo

use of org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo in project ozone by apache.

the class ObjectEndpoint method completeMultipartUpload.

/**
 * Complete a multipart upload.
 */
@POST
@Produces(MediaType.APPLICATION_XML)
public Response completeMultipartUpload(@PathParam("bucket") String bucket, @PathParam("path") String key, @QueryParam("uploadId") @DefaultValue("") String uploadID, CompleteMultipartUploadRequest multipartUploadRequest) throws IOException, OS3Exception {
    OzoneBucket ozoneBucket = getBucket(bucket);
    // Using LinkedHashMap to preserve ordering of parts list.
    Map<Integer, String> partsMap = new LinkedHashMap<>();
    List<CompleteMultipartUploadRequest.Part> partList = multipartUploadRequest.getPartList();
    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo;
    try {
        for (CompleteMultipartUploadRequest.Part part : partList) {
            partsMap.put(part.getPartNumber(), part.geteTag());
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Parts map {}", partsMap);
        }
        omMultipartUploadCompleteInfo = ozoneBucket.completeMultipartUpload(key, uploadID, partsMap);
        CompleteMultipartUploadResponse completeMultipartUploadResponse = new CompleteMultipartUploadResponse();
        completeMultipartUploadResponse.setBucket(bucket);
        completeMultipartUploadResponse.setKey(key);
        completeMultipartUploadResponse.setETag(omMultipartUploadCompleteInfo.getHash());
        // Location also setting as bucket name.
        completeMultipartUploadResponse.setLocation(bucket);
        return Response.status(Status.OK).entity(completeMultipartUploadResponse).build();
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.INVALID_PART) {
            throw newError(S3ErrorTable.INVALID_PART, key, ex);
        } else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) {
            throw newError(S3ErrorTable.INVALID_PART_ORDER, key, ex);
        } else if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
            throw newError(NO_SUCH_UPLOAD, uploadID, ex);
        } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) {
            throw newError(ENTITY_TOO_SMALL, key, ex);
        } else if (ex.getResult() == ResultCodes.INVALID_REQUEST) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part");
            throw os3Exception;
        } else if (ex.getResult() == ResultCodes.NOT_A_FILE) {
            OS3Exception os3Exception = newError(INVALID_REQUEST, key, ex);
            os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: " + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are " + "considered as Unix Paths. A directory already exists with a " + "given KeyName caused failure for MPU");
            throw os3Exception;
        }
        LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " + ", key: {}", bucket, key, ex);
        throw ex;
    }
}
Also used : LinkedHashMap(java.util.LinkedHashMap) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces)

Example 3 with OmMultipartUploadCompleteInfo

use of org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo in project ozone by apache.

the class TestOzoneRpcClientAbstract method testCommitPartAfterCompleteUpload.

@Test
public void testCommitPartAfterCompleteUpload() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String keyName = UUID.randomUUID().toString();
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RATIS, ONE);
    Assert.assertNotNull(omMultipartInfo.getUploadID());
    String uploadID = omMultipartInfo.getUploadID();
    // upload part 1.
    byte[] data = generateData(5 * 1024 * 1024, (byte) RandomUtils.nextLong());
    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID);
    ozoneOutputStream.write(data, 0, data.length);
    ozoneOutputStream.close();
    OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
    // Do not close output stream for part 2.
    ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID());
    ozoneOutputStream.write(data, 0, data.length);
    Map<Integer, String> partsMap = new LinkedHashMap<>();
    partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap);
    Assert.assertNotNull(omMultipartCommitUploadPartInfo);
    byte[] fileContent = new byte[data.length];
    OzoneInputStream inputStream = bucket.readKey(keyName);
    inputStream.read(fileContent);
    StringBuilder sb = new StringBuilder(data.length);
    // Combine all parts data, and check is it matching with get key data.
    String part1 = new String(data, UTF_8);
    sb.append(part1);
    Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
    try {
        ozoneOutputStream.close();
        fail("testCommitPartAfterCompleteUpload failed");
    } catch (IOException ex) {
        assertTrue(ex instanceof OMException);
        assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ((OMException) ex).getResult());
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) IOException(java.io.IOException) LinkedHashMap(java.util.LinkedHashMap) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) OmMultipartCommitUploadPartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo) OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo) OmMultipartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) Test(org.junit.Test)

Example 4 with OmMultipartUploadCompleteInfo

use of org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo in project ozone by apache.

the class TestOzoneClientMultipartUploadWithFSO method testCommitPartAfterCompleteUpload.

@Test
public void testCommitPartAfterCompleteUpload() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String parentDir = "a/b/c/d/";
    String keyName = parentDir + UUID.randomUUID().toString();
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RATIS, ONE);
    Assert.assertNotNull(omMultipartInfo.getUploadID());
    String uploadID = omMultipartInfo.getUploadID();
    // upload part 1.
    byte[] data = generateData(5 * 1024 * 1024, (byte) RandomUtils.nextLong());
    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID);
    ozoneOutputStream.write(data, 0, data.length);
    ozoneOutputStream.close();
    OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
    // Do not close output stream for part 2.
    ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID());
    ozoneOutputStream.write(data, 0, data.length);
    Map<Integer, String> partsMap = new LinkedHashMap<>();
    partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap);
    Assert.assertNotNull(omMultipartUploadCompleteInfo);
    Assert.assertNotNull(omMultipartCommitUploadPartInfo);
    byte[] fileContent = new byte[data.length];
    OzoneInputStream inputStream = bucket.readKey(keyName);
    inputStream.read(fileContent);
    StringBuilder sb = new StringBuilder(data.length);
    // Combine all parts data, and check is it matching with get key data.
    String part1 = new String(data, UTF_8);
    sb.append(part1);
    Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
    try {
        ozoneOutputStream.close();
        Assert.fail("testCommitPartAfterCompleteUpload failed");
    } catch (IOException ex) {
        Assert.assertTrue(ex instanceof OMException);
        Assert.assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ((OMException) ex).getResult());
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) IOException(java.io.IOException) LinkedHashMap(java.util.LinkedHashMap) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmMultipartCommitUploadPartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo) OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo) OmMultipartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) Test(org.junit.Test)

Example 5 with OmMultipartUploadCompleteInfo

use of org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo in project ozone by apache.

the class TestOzoneClientMultipartUploadWithFSO method completeMultipartUpload.

private void completeMultipartUpload(OzoneBucket bucket, String keyName, String uploadID, Map<Integer, String> partsMap) throws Exception {
    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap);
    Assert.assertNotNull(omMultipartUploadCompleteInfo);
    Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), bucket.getName());
    Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), bucket.getVolumeName());
    Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), keyName);
    Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash());
}
Also used : OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo)

Aggregations

OmMultipartUploadCompleteInfo (org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo)10 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)5 IOException (java.io.IOException)3 LinkedHashMap (java.util.LinkedHashMap)3 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)3 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)3 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)3 OmMultipartCommitUploadPartInfo (org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo)3 OmMultipartInfo (org.apache.hadoop.ozone.om.helpers.OmMultipartInfo)3 HashMap (java.util.HashMap)2 Map (java.util.Map)2 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)2 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)2 OmMultipartUploadCompleteList (org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList)2 Test (org.junit.Test)2 VisibleForTesting (com.google.common.annotations.VisibleForTesting)1 Preconditions (com.google.common.base.Preconditions)1 Strings (com.google.common.base.Strings)1 CacheBuilder (com.google.common.cache.CacheBuilder)1 ByteString (com.google.protobuf.ByteString)1