Search in sources :

Example 1 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class RpcClient method createInputStream.

private OzoneInputStream createInputStream(OmKeyInfo keyInfo, Function<OmKeyInfo, OmKeyInfo> retryFunction) throws IOException {
    // When Key is not MPU or when Key is MPU and encryption is not enabled
    // Need to revisit for GDP.
    FileEncryptionInfo feInfo = keyInfo.getFileEncryptionInfo();
    if (feInfo == null) {
        LengthInputStream lengthInputStream = KeyInputStream.getFromOmKeyInfo(keyInfo, xceiverClientManager, clientConfig.isChecksumVerify(), retryFunction);
        try {
            Map<String, String> keyInfoMetadata = keyInfo.getMetadata();
            if (Boolean.valueOf(keyInfoMetadata.get(OzoneConsts.GDPR_FLAG))) {
                GDPRSymmetricKey gk = new GDPRSymmetricKey(keyInfoMetadata.get(OzoneConsts.GDPR_SECRET), keyInfoMetadata.get(OzoneConsts.GDPR_ALGORITHM));
                gk.getCipher().init(Cipher.DECRYPT_MODE, gk.getSecretKey());
                return new OzoneInputStream(new CipherInputStream(lengthInputStream, gk.getCipher()));
            }
        } catch (Exception ex) {
            throw new IOException(ex);
        }
        return new OzoneInputStream(lengthInputStream.getWrappedStream());
    } else if (!keyInfo.getLatestVersionLocations().isMultipartKey()) {
        // Regular Key with FileEncryptionInfo
        LengthInputStream lengthInputStream = KeyInputStream.getFromOmKeyInfo(keyInfo, xceiverClientManager, clientConfig.isChecksumVerify(), retryFunction);
        final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
        final CryptoInputStream cryptoIn = new CryptoInputStream(lengthInputStream.getWrappedStream(), OzoneKMSUtil.getCryptoCodec(conf, feInfo), decrypted.getMaterial(), feInfo.getIV());
        return new OzoneInputStream(cryptoIn);
    } else {
        // Multipart Key with FileEncryptionInfo
        List<LengthInputStream> lengthInputStreams = KeyInputStream.getStreamsFromKeyInfo(keyInfo, xceiverClientManager, clientConfig.isChecksumVerify(), retryFunction);
        final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
        List<OzoneCryptoInputStream> cryptoInputStreams = new ArrayList<>();
        for (LengthInputStream lengthInputStream : lengthInputStreams) {
            final OzoneCryptoInputStream ozoneCryptoInputStream = new OzoneCryptoInputStream(lengthInputStream, OzoneKMSUtil.getCryptoCodec(conf, feInfo), decrypted.getMaterial(), feInfo.getIV());
            cryptoInputStreams.add(ozoneCryptoInputStream);
        }
        return new MultipartCryptoKeyInputStream(keyInfo.getKeyName(), cryptoInputStreams);
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) GDPRSymmetricKey(org.apache.hadoop.ozone.security.GDPRSymmetricKey) CipherInputStream(javax.crypto.CipherInputStream) IOException(java.io.IOException) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) InvalidKeyException(java.security.InvalidKeyException) IOException(java.io.IOException) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OzoneCryptoInputStream(org.apache.hadoop.ozone.client.io.OzoneCryptoInputStream) CryptoInputStream(org.apache.hadoop.crypto.CryptoInputStream) LengthInputStream(org.apache.hadoop.ozone.client.io.LengthInputStream) MultipartCryptoKeyInputStream(org.apache.hadoop.ozone.client.io.MultipartCryptoKeyInputStream) ArrayList(java.util.ArrayList) OmMultipartUploadCompleteList(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList) OzoneMultipartUploadList(org.apache.hadoop.ozone.client.OzoneMultipartUploadList) List(java.util.List) OmMultipartUploadList(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList) OzoneCryptoInputStream(org.apache.hadoop.ozone.client.io.OzoneCryptoInputStream)

Example 2 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class RpcClient method getKeysEveryReplicas.

@Override
public Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> getKeysEveryReplicas(String volumeName, String bucketName, String keyName) throws IOException {
    Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> result = new LinkedHashMap<>();
    verifyVolumeName(volumeName);
    verifyBucketName(bucketName);
    Preconditions.checkNotNull(keyName);
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).setSortDatanodesInPipeline(topologyAwareReadEnabled).build();
    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
    List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
    for (OmKeyLocationInfo keyLocationInfo : keyLocationInfos) {
        Map<DatanodeDetails, OzoneInputStream> blocks = new HashMap<>();
        Pipeline pipelineBefore = keyLocationInfo.getPipeline();
        List<DatanodeDetails> datanodes = pipelineBefore.getNodes();
        for (DatanodeDetails dn : datanodes) {
            List<DatanodeDetails> nodes = new ArrayList<>();
            nodes.add(dn);
            Pipeline pipeline = new Pipeline.Builder(pipelineBefore).setNodes(nodes).setId(PipelineID.randomId()).build();
            keyLocationInfo.setPipeline(pipeline);
            List<OmKeyLocationInfo> keyLocationInfoList = new ArrayList<>();
            keyLocationInfoList.add(keyLocationInfo);
            OmKeyLocationInfoGroup keyLocationInfoGroup = new OmKeyLocationInfoGroup(0, keyLocationInfoList);
            List<OmKeyLocationInfoGroup> keyLocationInfoGroups = new ArrayList<>();
            keyLocationInfoGroups.add(keyLocationInfoGroup);
            keyInfo.setKeyLocationVersions(keyLocationInfoGroups);
            OzoneInputStream is = createInputStream(keyInfo, Function.identity());
            blocks.put(dn, is);
        }
        result.put(keyLocationInfo, blocks);
    }
    return result;
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) CacheBuilder(com.google.common.cache.CacheBuilder) ArrayList(java.util.ArrayList) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) LinkedHashMap(java.util.LinkedHashMap) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap)

Example 3 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneClient method testPutKeyRatisOneNode.

@Test
public void testPutKeyRatisOneNode() throws IOException {
    Instant testStartTime = Instant.now();
    String value = "sample value";
    OzoneBucket bucket = getOzoneBucket();
    for (int i = 0; i < 10; i++) {
        String keyName = UUID.randomUUID().toString();
        OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
        out.write(value.getBytes(UTF_8));
        out.close();
        OzoneKey key = bucket.getKey(keyName);
        Assert.assertEquals(keyName, key.getName());
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] fileContent = new byte[value.getBytes(UTF_8).length];
        Assert.assertEquals(value.length(), is.read(fileContent));
        is.close();
        Assert.assertEquals(value, new String(fileContent, UTF_8));
        Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
        Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) Instant(java.time.Instant) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) Test(org.junit.Test)

Example 4 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestStorageContainerManagerHA method testPutKey.

public void testPutKey() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    ObjectStore store = OzoneClientFactory.getRpcClient(cluster.getConf()).getObjectStore();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    OzoneKey key = bucket.getKey(keyName);
    Assert.assertEquals(keyName, key.getName());
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[value.getBytes(UTF_8).length];
    is.read(fileContent);
    Assert.assertEquals(value, new String(fileContent, UTF_8));
    Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
    Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    is.close();
    final OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
    final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    final List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
    long index = -1;
    for (StorageContainerManager scm : cluster.getStorageContainerManagers()) {
        if (scm.checkLeader()) {
            index = getLastAppliedIndex(scm);
        }
    }
    Assert.assertFalse(index == -1);
    long finalIndex = index;
    // Ensure all follower scms have caught up with the leader
    GenericTestUtils.waitFor(() -> areAllScmInSync(finalIndex), 100, 10000);
    final long containerID = keyLocationInfos.get(0).getContainerID();
    for (int k = 0; k < numOfSCMs; k++) {
        StorageContainerManager scm = cluster.getStorageContainerManagers().get(k);
        // flush to DB on each SCM
        ((SCMRatisServerImpl) scm.getScmHAManager().getRatisServer()).getStateMachine().takeSnapshot();
        Assert.assertTrue(scm.getContainerManager().containerExist(ContainerID.valueOf(containerID)));
        Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(ContainerID.valueOf(containerID)));
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) Instant(java.time.Instant) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo)

Example 5 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneManagerHA method createKeyTest.

protected void createKeyTest(boolean checkSuccess) throws Exception {
    String userName = "user" + RandomStringUtils.randomNumeric(5);
    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder().setOwner(userName).setAdmin(adminName).build();
    try {
        getObjectStore().createVolume(volumeName, createVolumeArgs);
        OzoneVolume retVolumeinfo = getObjectStore().getVolume(volumeName);
        Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
        Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
        Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
        String bucketName = UUID.randomUUID().toString();
        String keyName = UUID.randomUUID().toString();
        retVolumeinfo.createBucket(bucketName);
        OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
        Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
        Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName));
        String value = "random data";
        OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, value.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
        ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length());
        ozoneOutputStream.close();
        OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
        byte[] fileContent = new byte[value.getBytes(UTF_8).length];
        ozoneInputStream.read(fileContent);
        Assert.assertEquals(value, new String(fileContent, UTF_8));
    } catch (ConnectException | RemoteException e) {
        if (!checkSuccess) {
            // last running OM as it would fail to get a quorum.
            if (e instanceof RemoteException) {
                GenericTestUtils.assertExceptionContains("OMNotLeaderException", e);
            }
        } else {
            throw e;
        }
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) VolumeArgs(org.apache.hadoop.ozone.client.VolumeArgs) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) ConnectException(java.net.ConnectException)

Aggregations

OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)47 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)33 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)26 Test (org.junit.Test)26 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)22 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)17 IOException (java.io.IOException)15 OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)13 Instant (java.time.Instant)12 HashMap (java.util.HashMap)11 LinkedHashMap (java.util.LinkedHashMap)10 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)8 ArrayList (java.util.ArrayList)7 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)7 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)7 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)7 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)6 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)6 File (java.io.File)5 HttpHeaders (javax.ws.rs.core.HttpHeaders)5