Search in sources :

Example 6 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestObjectStoreWithFSO method createKey.

private void createKey(OzoneBucket ozoneBucket, String key, int length, byte[] input) throws Exception {
    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key, length);
    ozoneOutputStream.write(input);
    ozoneOutputStream.write(input, 0, 10);
    ozoneOutputStream.close();
    // Read the key with given key name.
    OzoneInputStream ozoneInputStream = ozoneBucket.readKey(key);
    byte[] read = new byte[length];
    ozoneInputStream.read(read, 0, length);
    ozoneInputStream.close();
    String inputString = new String(input, StandardCharsets.UTF_8);
    Assert.assertEquals(inputString, new String(read, StandardCharsets.UTF_8));
    // Read using filesystem.
    String rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucketName, volumeName, StandardCharsets.UTF_8);
    OzoneFileSystem o3fs = (OzoneFileSystem) FileSystem.get(new URI(rootPath), conf);
    FSDataInputStream fsDataInputStream = o3fs.open(new Path(key));
    read = new byte[length];
    fsDataInputStream.read(read, 0, length);
    ozoneInputStream.close();
    Assert.assertEquals(inputString, new String(read, StandardCharsets.UTF_8));
}
Also used : Path(org.apache.hadoop.fs.Path) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneFileSystem(org.apache.hadoop.fs.ozone.OzoneFileSystem) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) URI(java.net.URI)

Example 7 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class ObjectEndpoint method createMultipartKey.

private Response createMultipartKey(String bucket, String key, long length, int partNumber, String uploadID, InputStream body) throws IOException, OS3Exception {
    try {
        OzoneBucket ozoneBucket = getBucket(bucket);
        String copyHeader;
        OzoneOutputStream ozoneOutputStream = null;
        if ("STREAMING-AWS4-HMAC-SHA256-PAYLOAD".equals(headers.getHeaderString("x-amz-content-sha256"))) {
            body = new SignedChunksInputStream(body);
        }
        try {
            ozoneOutputStream = ozoneBucket.createMultipartKey(key, length, partNumber, uploadID);
            copyHeader = headers.getHeaderString(COPY_SOURCE_HEADER);
            if (copyHeader != null) {
                Pair<String, String> result = parseSourceHeader(copyHeader);
                String sourceBucket = result.getLeft();
                String sourceKey = result.getRight();
                Long sourceKeyModificationTime = getBucket(sourceBucket).getKey(sourceKey).getModificationTime().toEpochMilli();
                String copySourceIfModifiedSince = headers.getHeaderString(COPY_SOURCE_IF_MODIFIED_SINCE);
                String copySourceIfUnmodifiedSince = headers.getHeaderString(COPY_SOURCE_IF_UNMODIFIED_SINCE);
                if (!checkCopySourceModificationTime(sourceKeyModificationTime, copySourceIfModifiedSince, copySourceIfUnmodifiedSince)) {
                    throw newError(PRECOND_FAILED, sourceBucket + "/" + sourceKey);
                }
                try (OzoneInputStream sourceObject = getBucket(sourceBucket).readKey(sourceKey)) {
                    String range = headers.getHeaderString(COPY_SOURCE_HEADER_RANGE);
                    if (range != null) {
                        RangeHeader rangeHeader = RangeHeaderParserUtil.parseRangeHeader(range, 0);
                        final long skipped = sourceObject.skip(rangeHeader.getStartOffset());
                        if (skipped != rangeHeader.getStartOffset()) {
                            throw new EOFException("Bytes to skip: " + rangeHeader.getStartOffset() + " actual: " + skipped);
                        }
                        IOUtils.copyLarge(sourceObject, ozoneOutputStream, 0, rangeHeader.getEndOffset() - rangeHeader.getStartOffset() + 1);
                    } else {
                        IOUtils.copy(sourceObject, ozoneOutputStream);
                    }
                }
            } else {
                IOUtils.copy(body, ozoneOutputStream);
            }
        } finally {
            if (ozoneOutputStream != null) {
                ozoneOutputStream.close();
            }
        }
        assert ozoneOutputStream != null;
        OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
        String eTag = omMultipartCommitUploadPartInfo.getPartName();
        if (copyHeader != null) {
            return Response.ok(new CopyPartResult(eTag)).build();
        } else {
            return Response.ok().header("ETag", eTag).build();
        }
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) {
            throw newError(NO_SUCH_UPLOAD, uploadID, ex);
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, bucket + "/" + key, ex);
        }
        throw ex;
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) SignedChunksInputStream(org.apache.hadoop.ozone.s3.SignedChunksInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RangeHeader(org.apache.hadoop.ozone.s3.util.RangeHeader) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmMultipartCommitUploadPartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo) OptionalLong(java.util.OptionalLong) EOFException(java.io.EOFException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 8 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneRpcClientAbstract method readCorruptedKey.

private void readCorruptedKey(String volumeName, String bucketName, String keyName, boolean verifyChecksum) {
    try {
        OzoneConfiguration configuration = cluster.getConf();
        final OzoneClientConfig clientConfig = configuration.getObject(OzoneClientConfig.class);
        clientConfig.setChecksumVerify(verifyChecksum);
        configuration.setFromObject(clientConfig);
        RpcClient client = new RpcClient(configuration, null);
        OzoneInputStream is = client.getKey(volumeName, bucketName, keyName);
        is.read(new byte[100]);
        is.close();
        if (verifyChecksum) {
            fail("Reading corrupted data should fail, as verify checksum is " + "enabled");
        }
    } catch (IOException e) {
        if (!verifyChecksum) {
            fail("Reading corrupted data should not fail, as verify checksum is " + "disabled");
        }
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) IOException(java.io.IOException)

Example 9 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneRpcClientAbstract method testPutKeyRatisThreeNodes.

@Test
public void testPutKeyRatisThreeNodes() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    for (int i = 0; i < 10; i++) {
        String keyName = UUID.randomUUID().toString();
        OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
        out.write(value.getBytes(UTF_8));
        out.close();
        OzoneKey key = bucket.getKey(keyName);
        Assert.assertEquals(keyName, key.getName());
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] fileContent = new byte[value.getBytes(UTF_8).length];
        is.read(fileContent);
        is.close();
        Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, ReplicationType.RATIS, THREE));
        Assert.assertEquals(value, new String(fileContent, UTF_8));
        Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
        Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) Instant(java.time.Instant) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) Test(org.junit.Test)

Example 10 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneRpcClientAbstract method testDeletedKeyForGDPR.

/**
 * Tests deletedKey for GDPR.
 * 1. Create GDPR Enabled bucket.
 * 2. Create a Key in this bucket so it gets encrypted via GDPRSymmetricKey.
 * 3. Read key and validate the content/metadata is as expected because the
 * readKey will decrypt using the GDPR Symmetric Key with details from KeyInfo
 * Metadata.
 * 4. Delete this key in GDPR enabled bucket
 * 5. Confirm the deleted key metadata in deletedTable does not contain the
 * GDPR encryption details (flag, secret, algorithm).
 * @throws Exception
 */
@Test
public void testDeletedKeyForGDPR() throws Exception {
    // Step 1
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String keyName = UUID.randomUUID().toString();
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    BucketArgs args = BucketArgs.newBuilder().addMetadata(OzoneConsts.GDPR_FLAG, "true").build();
    volume.createBucket(bucketName, args);
    OzoneBucket bucket = volume.getBucket(bucketName);
    Assert.assertEquals(bucketName, bucket.getName());
    Assert.assertNotNull(bucket.getMetadata());
    Assert.assertEquals("true", bucket.getMetadata().get(OzoneConsts.GDPR_FLAG));
    // Step 2
    String text = "hello world";
    Map<String, String> keyMetadata = new HashMap<>();
    keyMetadata.put(OzoneConsts.GDPR_FLAG, "true");
    OzoneOutputStream out = bucket.createKey(keyName, text.getBytes(UTF_8).length, RATIS, ONE, keyMetadata);
    out.write(text.getBytes(UTF_8));
    out.close();
    // Step 3
    OzoneKeyDetails key = bucket.getKey(keyName);
    Assert.assertEquals(keyName, key.getName());
    Assert.assertEquals("true", key.getMetadata().get(OzoneConsts.GDPR_FLAG));
    Assert.assertEquals("AES", key.getMetadata().get(OzoneConsts.GDPR_ALGORITHM));
    Assert.assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null);
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[text.getBytes(UTF_8).length];
    is.read(fileContent);
    Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, RATIS, ONE));
    Assert.assertEquals(text, new String(fileContent, UTF_8));
    // Step 4
    bucket.deleteKey(keyName);
    // Step 5
    OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
    String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
    RepeatedOmKeyInfo deletedKeys = omMetadataManager.getDeletedTable().get(objectKey);
    if (deletedKeys != null) {
        Map<String, String> deletedKeyMetadata = deletedKeys.getOmKeyInfoList().get(0).getMetadata();
        Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_FLAG));
        Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_SECRET));
        Assert.assertFalse(deletedKeyMetadata.containsKey(OzoneConsts.GDPR_ALGORITHM));
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) BucketArgs(org.apache.hadoop.ozone.client.BucketArgs) OMMetadataManager(org.apache.hadoop.ozone.om.OMMetadataManager) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) Test(org.junit.Test)

Aggregations

OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)47 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)33 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)26 Test (org.junit.Test)26 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)22 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)17 IOException (java.io.IOException)15 OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)13 Instant (java.time.Instant)12 HashMap (java.util.HashMap)11 LinkedHashMap (java.util.LinkedHashMap)10 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)8 ArrayList (java.util.ArrayList)7 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)7 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)7 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)7 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)6 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)6 File (java.io.File)5 HttpHeaders (javax.ws.rs.core.HttpHeaders)5