Search in sources :

Example 1 with OzoneBucket

use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.

the class RpcClient method getBucketDetails.

@Override
public OzoneBucket getBucketDetails(String volumeName, String bucketName) throws IOException {
    verifyVolumeName(volumeName);
    verifyBucketName(bucketName);
    OmBucketInfo bucketInfo = ozoneManagerClient.getBucketInfo(volumeName, bucketName);
    return new OzoneBucket(conf, this, bucketInfo.getVolumeName(), bucketInfo.getBucketName(), bucketInfo.getStorageType(), bucketInfo.getIsVersionEnabled(), bucketInfo.getCreationTime(), bucketInfo.getModificationTime(), bucketInfo.getMetadata(), bucketInfo.getEncryptionKeyInfo() != null ? bucketInfo.getEncryptionKeyInfo().getKeyName() : null, bucketInfo.getSourceVolume(), bucketInfo.getSourceBucket(), bucketInfo.getUsedBytes(), bucketInfo.getUsedNamespace(), bucketInfo.getQuotaInBytes(), bucketInfo.getQuotaInNamespace(), bucketInfo.getBucketLayout(), bucketInfo.getOwner());
}
Also used : OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket)

Example 2 with OzoneBucket

use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.

the class TestReplicatedFileChecksumHelper method testOneBlock.

@Test
public void testOneBlock() throws IOException {
    // test the file checksum of a file with one block.
    OzoneConfiguration conf = new OzoneConfiguration();
    RpcClient mockRpcClient = Mockito.mock(RpcClient.class);
    List<DatanodeDetails> dns = Arrays.asList(DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build());
    Pipeline pipeline;
    pipeline = Pipeline.newBuilder().setId(PipelineID.randomId()).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)).setState(Pipeline.PipelineState.CLOSED).setNodes(dns).build();
    XceiverClientGrpc xceiverClientGrpc = new XceiverClientGrpc(pipeline, conf) {

        @Override
        public XceiverClientReply sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request, DatanodeDetails dn) {
            return buildValidResponse();
        }
    };
    XceiverClientFactory factory = Mockito.mock(XceiverClientFactory.class);
    when(factory.acquireClientForReadData(ArgumentMatchers.any())).thenReturn(xceiverClientGrpc);
    when(mockRpcClient.getXceiverClientManager()).thenReturn(factory);
    OzoneManagerProtocol om = Mockito.mock(OzoneManagerProtocol.class);
    when(mockRpcClient.getOzoneManagerClient()).thenReturn(om);
    BlockID blockID = new BlockID(1, 1);
    OmKeyLocationInfo omKeyLocationInfo = new OmKeyLocationInfo.Builder().setPipeline(pipeline).setBlockID(blockID).build();
    List<OmKeyLocationInfo> omKeyLocationInfoList = Arrays.asList(omKeyLocationInfo);
    OmKeyInfo omKeyInfo = new OmKeyInfo.Builder().setVolumeName(null).setBucketName(null).setKeyName(null).setOmKeyLocationInfos(Collections.singletonList(new OmKeyLocationInfoGroup(0, omKeyLocationInfoList))).setCreationTime(Time.now()).setModificationTime(Time.now()).setDataSize(0).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setFileEncryptionInfo(null).setAcls(null).build();
    when(om.lookupKey(ArgumentMatchers.any())).thenReturn(omKeyInfo);
    OzoneVolume mockVolume = Mockito.mock(OzoneVolume.class);
    when(mockVolume.getName()).thenReturn("vol1");
    OzoneBucket bucket = Mockito.mock(OzoneBucket.class);
    when(bucket.getName()).thenReturn("bucket1");
    ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper(mockVolume, bucket, "dummy", 10, mockRpcClient);
    helper.compute();
    FileChecksum fileChecksum = helper.getFileChecksum();
    assertTrue(fileChecksum instanceof MD5MD5CRC32GzipFileChecksum);
    assertEquals(1, helper.getKeyLocationInfoList().size());
}
Also used : OzoneManagerProtocol(org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MD5MD5CRC32GzipFileChecksum(org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum) MockXceiverClientFactory(org.apache.hadoop.ozone.client.MockXceiverClientFactory) XceiverClientFactory(org.apache.hadoop.hdds.scm.XceiverClientFactory) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) MD5MD5CRC32GzipFileChecksum(org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum) FileChecksum(org.apache.hadoop.fs.FileChecksum) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) RpcClient(org.apache.hadoop.ozone.client.rpc.RpcClient) Test(org.junit.Test)

Example 3 with OzoneBucket

use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.

the class TestStorageContainerManagerHA method testPutKey.

public void testPutKey() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    ObjectStore store = OzoneClientFactory.getRpcClient(cluster.getConf()).getObjectStore();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    OzoneKey key = bucket.getKey(keyName);
    Assert.assertEquals(keyName, key.getName());
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[value.getBytes(UTF_8).length];
    is.read(fileContent);
    Assert.assertEquals(value, new String(fileContent, UTF_8));
    Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
    Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    is.close();
    final OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
    final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    final List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
    long index = -1;
    for (StorageContainerManager scm : cluster.getStorageContainerManagers()) {
        if (scm.checkLeader()) {
            index = getLastAppliedIndex(scm);
        }
    }
    Assert.assertFalse(index == -1);
    long finalIndex = index;
    // Ensure all follower scms have caught up with the leader
    GenericTestUtils.waitFor(() -> areAllScmInSync(finalIndex), 100, 10000);
    final long containerID = keyLocationInfos.get(0).getContainerID();
    for (int k = 0; k < numOfSCMs; k++) {
        StorageContainerManager scm = cluster.getStorageContainerManagers().get(k);
        // flush to DB on each SCM
        ((SCMRatisServerImpl) scm.getScmHAManager().getRatisServer()).getStateMachine().takeSnapshot();
        Assert.assertTrue(scm.getContainerManager().containerExist(ContainerID.valueOf(containerID)));
        Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(ContainerID.valueOf(containerID)));
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) Instant(java.time.Instant) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo)

Example 4 with OzoneBucket

use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.

the class TestOzoneManagerHA method createKeyTest.

protected void createKeyTest(boolean checkSuccess) throws Exception {
    String userName = "user" + RandomStringUtils.randomNumeric(5);
    String adminName = "admin" + RandomStringUtils.randomNumeric(5);
    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
    VolumeArgs createVolumeArgs = VolumeArgs.newBuilder().setOwner(userName).setAdmin(adminName).build();
    try {
        getObjectStore().createVolume(volumeName, createVolumeArgs);
        OzoneVolume retVolumeinfo = getObjectStore().getVolume(volumeName);
        Assert.assertTrue(retVolumeinfo.getName().equals(volumeName));
        Assert.assertTrue(retVolumeinfo.getOwner().equals(userName));
        Assert.assertTrue(retVolumeinfo.getAdmin().equals(adminName));
        String bucketName = UUID.randomUUID().toString();
        String keyName = UUID.randomUUID().toString();
        retVolumeinfo.createBucket(bucketName);
        OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName);
        Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
        Assert.assertTrue(ozoneBucket.getVolumeName().equals(volumeName));
        String value = "random data";
        OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, value.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
        ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length());
        ozoneOutputStream.close();
        OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName);
        byte[] fileContent = new byte[value.getBytes(UTF_8).length];
        ozoneInputStream.read(fileContent);
        Assert.assertEquals(value, new String(fileContent, UTF_8));
    } catch (ConnectException | RemoteException e) {
        if (!checkSuccess) {
            // last running OM as it would fail to get a quorum.
            if (e instanceof RemoteException) {
                GenericTestUtils.assertExceptionContains("OMNotLeaderException", e);
            }
        } else {
            throw e;
        }
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) VolumeArgs(org.apache.hadoop.ozone.client.VolumeArgs) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) RemoteException(org.apache.hadoop.ipc.RemoteException) ConnectException(java.net.ConnectException)

Example 5 with OzoneBucket

use of org.apache.hadoop.ozone.client.OzoneBucket in project ozone by apache.

the class TestOzoneManagerHAMetadataOnly method testAllBucketOperations.

@Test
public void testAllBucketOperations() throws Exception {
    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
    String bucketName = "volume" + RandomStringUtils.randomNumeric(5);
    OzoneVolume retVolume = createAndCheckVolume(volumeName);
    BucketArgs bucketArgs = BucketArgs.newBuilder().setStorageType(StorageType.DISK).setVersioning(true).build();
    retVolume.createBucket(bucketName, bucketArgs);
    OzoneBucket ozoneBucket = retVolume.getBucket(bucketName);
    Assert.assertEquals(volumeName, ozoneBucket.getVolumeName());
    Assert.assertEquals(bucketName, ozoneBucket.getName());
    Assert.assertTrue(ozoneBucket.getVersioning());
    Assert.assertEquals(StorageType.DISK, ozoneBucket.getStorageType());
    Assert.assertFalse(ozoneBucket.getCreationTime().isAfter(Instant.now()));
    // Change versioning to false
    ozoneBucket.setVersioning(false);
    ozoneBucket = retVolume.getBucket(bucketName);
    Assert.assertFalse(ozoneBucket.getVersioning());
    retVolume.deleteBucket(bucketName);
    OzoneTestUtils.expectOmException(OMException.ResultCodes.BUCKET_NOT_FOUND, () -> retVolume.deleteBucket(bucketName));
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) BucketArgs(org.apache.hadoop.ozone.client.BucketArgs) Test(org.junit.Test)

Aggregations

OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)241 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)166 Test (org.junit.Test)155 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)62 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)43 IOException (java.io.IOException)35 ArrayList (java.util.ArrayList)26 ObjectStore (org.apache.hadoop.ozone.client.ObjectStore)26 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)26 OzoneAcl (org.apache.hadoop.ozone.OzoneAcl)24 OzoneClient (org.apache.hadoop.ozone.client.OzoneClient)24 OFSPath (org.apache.hadoop.ozone.OFSPath)23 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)22 HashMap (java.util.HashMap)21 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)20 BucketArgs (org.apache.hadoop.ozone.client.BucketArgs)20 Path (org.apache.hadoop.fs.Path)18 OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)18 OmMultipartInfo (org.apache.hadoop.ozone.om.helpers.OmMultipartInfo)18 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)15