Search in sources :

Example 1 with OzoneOutputStream

use of org.apache.hadoop.ozone.client.io.OzoneOutputStream in project ozone by apache.

the class RpcClient method createMultipartKey.

@Override
public OzoneOutputStream createMultipartKey(String volumeName, String bucketName, String keyName, long size, int partNumber, String uploadID) throws IOException {
    verifyVolumeName(volumeName);
    verifyBucketName(bucketName);
    if (checkKeyNameEnabled) {
        HddsClientUtils.verifyKeyName(keyName);
    }
    HddsClientUtils.checkNotNull(keyName, uploadID);
    Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000, "Part " + "number should be greater than zero and less than or equal to 10000");
    Preconditions.checkArgument(size >= 0, "size should be greater than or " + "equal to zero");
    String requestId = UUID.randomUUID().toString();
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setDataSize(size).setIsMultipartKey(true).setMultipartUploadID(uploadID).setMultipartUploadPartNumber(partNumber).setAcls(getAclList()).build();
    OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
    KeyOutputStream keyOutputStream = new KeyOutputStream.Builder().setHandler(openKey).setXceiverClientManager(xceiverClientManager).setOmClient(ozoneManagerClient).setRequestID(requestId).setReplicationConfig(openKey.getKeyInfo().getReplicationConfig()).setMultipartNumber(partNumber).setMultipartUploadID(uploadID).setIsMultipartKey(true).enableUnsafeByteBufferConversion(unsafeByteBufferConversion).setConfig(clientConfig).build();
    keyOutputStream.addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(), openKey.getOpenVersion());
    FileEncryptionInfo feInfo = openKey.getKeyInfo().getFileEncryptionInfo();
    if (feInfo != null) {
        KeyProvider.KeyVersion decrypted = getDEK(feInfo);
        final CryptoOutputStream cryptoOut = new CryptoOutputStream(keyOutputStream, OzoneKMSUtil.getCryptoCodec(conf, feInfo), decrypted.getMaterial(), feInfo.getIV());
        return new OzoneOutputStream(cryptoOut);
    } else {
        return new OzoneOutputStream(keyOutputStream);
    }
}
Also used : KeyProvider(org.apache.hadoop.crypto.key.KeyProvider) CryptoOutputStream(org.apache.hadoop.crypto.CryptoOutputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OpenKeySession(org.apache.hadoop.ozone.om.helpers.OpenKeySession) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs)

Example 2 with OzoneOutputStream

use of org.apache.hadoop.ozone.client.io.OzoneOutputStream in project ozone by apache.

the class TestOzoneClient method testPutKeyRatisOneNode.

@Test
public void testPutKeyRatisOneNode() throws IOException {
    Instant testStartTime = Instant.now();
    String value = "sample value";
    OzoneBucket bucket = getOzoneBucket();
    for (int i = 0; i < 10; i++) {
        String keyName = UUID.randomUUID().toString();
        OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>());
        out.write(value.getBytes(UTF_8));
        out.close();
        OzoneKey key = bucket.getKey(keyName);
        Assert.assertEquals(keyName, key.getName());
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] fileContent = new byte[value.getBytes(UTF_8).length];
        Assert.assertEquals(value.length(), is.read(fileContent));
        is.close();
        Assert.assertEquals(value, new String(fileContent, UTF_8));
        Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
        Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) Instant(java.time.Instant) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) Test(org.junit.Test)

Example 3 with OzoneOutputStream

use of org.apache.hadoop.ozone.client.io.OzoneOutputStream in project ozone by apache.

the class TestReconWithOzoneManagerHA method testReconGetsSnapshotFromLeader.

@Test
public void testReconGetsSnapshotFromLeader() throws Exception {
    AtomicReference<OzoneManager> ozoneManager = new AtomicReference<>();
    // Wait for OM leader election to finish
    GenericTestUtils.waitFor(() -> {
        OzoneManager om = cluster.getOMLeader();
        ozoneManager.set(om);
        return om != null;
    }, 100, 120000);
    Assert.assertNotNull("Timed out waiting OM leader election to finish: " + "no leader or more than one leader.", ozoneManager);
    Assert.assertTrue("Should have gotten the leader!", ozoneManager.get().isLeaderReady());
    OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl) cluster.getReconServer().getOzoneManagerServiceProvider();
    String hostname = ozoneManager.get().getHttpServer().getHttpAddress().getHostName();
    String expectedUrl = "http://" + (hostname.equals("0.0.0.0") ? "localhost" : hostname) + ":" + ozoneManager.get().getHttpServer().getHttpAddress().getPort() + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT;
    String snapshotUrl = impl.getOzoneManagerSnapshotUrl();
    Assert.assertEquals("OM Snapshot should be requested from the leader.", expectedUrl, snapshotUrl);
    // Write some data
    String keyPrefix = "ratis";
    OzoneOutputStream key = objectStore.getVolume(VOL_NAME).getBucket(VOL_NAME).createKey(keyPrefix, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    key.write(keyPrefix.getBytes(UTF_8));
    key.flush();
    key.close();
    // Sync data to Recon
    impl.syncDataFromOM();
    ReconContainerMetadataManager reconContainerMetadataManager = cluster.getReconServer().getReconContainerMetadataManager();
    TableIterator iterator = reconContainerMetadataManager.getContainerTableIterator();
    String reconKeyPrefix = null;
    while (iterator.hasNext()) {
        Table.KeyValue<ContainerKeyPrefix, Integer> keyValue = (Table.KeyValue<ContainerKeyPrefix, Integer>) iterator.next();
        reconKeyPrefix = keyValue.getKey().getKeyPrefix();
    }
    Assert.assertEquals("Container data should be synced to recon.", String.format("/%s/%s/%s", VOL_NAME, VOL_NAME, keyPrefix), reconKeyPrefix);
}
Also used : Table(org.apache.hadoop.hdds.utils.db.Table) AtomicReference(java.util.concurrent.atomic.AtomicReference) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OzoneManagerServiceProviderImpl(org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl) TableIterator(org.apache.hadoop.hdds.utils.db.TableIterator) ReconContainerMetadataManager(org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager) OzoneManager(org.apache.hadoop.ozone.om.OzoneManager) ContainerKeyPrefix(org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix) Test(org.junit.Test)

Example 4 with OzoneOutputStream

use of org.apache.hadoop.ozone.client.io.OzoneOutputStream in project ozone by apache.

the class TestStorageContainerManagerHA method testPutKey.

public void testPutKey() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    ObjectStore store = OzoneClientFactory.getRpcClient(cluster.getConf()).getObjectStore();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    OzoneKey key = bucket.getKey(keyName);
    Assert.assertEquals(keyName, key.getName());
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[value.getBytes(UTF_8).length];
    is.read(fileContent);
    Assert.assertEquals(value, new String(fileContent, UTF_8));
    Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
    Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    is.close();
    final OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
    final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    final List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
    long index = -1;
    for (StorageContainerManager scm : cluster.getStorageContainerManagers()) {
        if (scm.checkLeader()) {
            index = getLastAppliedIndex(scm);
        }
    }
    Assert.assertFalse(index == -1);
    long finalIndex = index;
    // Ensure all follower scms have caught up with the leader
    GenericTestUtils.waitFor(() -> areAllScmInSync(finalIndex), 100, 10000);
    final long containerID = keyLocationInfos.get(0).getContainerID();
    for (int k = 0; k < numOfSCMs; k++) {
        StorageContainerManager scm = cluster.getStorageContainerManagers().get(k);
        // flush to DB on each SCM
        ((SCMRatisServerImpl) scm.getScmHAManager().getRatisServer()).getStateMachine().takeSnapshot();
        Assert.assertTrue(scm.getContainerManager().containerExist(ContainerID.valueOf(containerID)));
        Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(ContainerID.valueOf(containerID)));
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) Instant(java.time.Instant) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo)

Example 5 with OzoneOutputStream

use of org.apache.hadoop.ozone.client.io.OzoneOutputStream in project ozone by apache.

the class TestOzoneManagerHA method createKeyTest.

protected void createKeyTest(boolean checkSuccess) throws Exception {
    String userName = "user" + RandomStringUtils.randomNumeric(5);
    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder().setOwner(userName).setAdmin(adminName).build();
    try {
        getObjectStore().createVolume(volumeName, createVolumeArgs);
        OzoneVolume retVolumeinfo = getObjectStore().getVolume(volumeName);
        Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
        Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
        Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
        String bucketName = UUID.randomUUID().toString();
        String keyName = UUID.randomUUID().toString();
        retVolumeinfo.createBucket(bucketName);
        OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
        Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
        Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName));
        String value = "random data";
        OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, value.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
        ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length());
        ozoneOutputStream.close();
        OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
        byte[] fileContent = new byte[value.getBytes(UTF_8).length];
        ozoneInputStream.read(fileContent);
        Assert.assertEquals(value, new String(fileContent, UTF_8));
    } catch (ConnectException | RemoteException e) {
        if (!checkSuccess) {
            // last running OM as it would fail to get a quorum.
            if (e instanceof RemoteException) {
                GenericTestUtils.assertExceptionContains("OMNotLeaderException", e);
            }
        } else {
            throw e;
        }
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) VolumeArgs(org.apache.hadoop.ozone.client.VolumeArgs) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) ConnectException(java.net.ConnectException)

Aggregations

OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)165 Test (org.junit.Test)117 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)67 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)59 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)53 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)35 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)33 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)33 OutputStream (java.io.OutputStream)32 BlockOutputStream (org.apache.hadoop.hdds.scm.storage.BlockOutputStream)32 IOException (java.io.IOException)31 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)31 RatisBlockOutputStream (org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream)29 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)23 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)21 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)20 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)19 XceiverClientRatis (org.apache.hadoop.hdds.scm.XceiverClientRatis)19 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)19 HashMap (java.util.HashMap)17