Search in sources :

Example 11 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneRpcClientAbstract method testGetKeyDetails.

@Test
public void testGetKeyDetails() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    String keyValue = RandomStringUtils.random(128);
    // String keyValue = "this is a test value.glx";
    // create the initial key with size 0, write will allocate the first block.
    OzoneOutputStream out = bucket.createKey(keyName, keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
    out.write(keyValue.getBytes(UTF_8));
    out.close();
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[32];
    is.read(fileContent);
    // First, confirm the key info from the client matches the info in OM.
    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
    builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
    OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
    long containerID = keyInfo.getContainerID();
    long localID = keyInfo.getLocalID();
    OzoneKeyDetails keyDetails = (OzoneKeyDetails) bucket.getKey(keyName);
    Assert.assertEquals(keyName, keyDetails.getName());
    List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
    Assert.assertEquals(1, keyLocations.size());
    Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
    Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
    // Make sure that the data size matched.
    Assert.assertEquals(keyValue.getBytes(UTF_8).length, keyLocations.get(0).getLength());
    // Second, sum the data size from chunks in Container via containerID
    // and localID, make sure the size equals to the size from keyDetails.
    ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
    List<DatanodeDetails> datanodes = pipeline.getNodes();
    Assert.assertEquals(datanodes.size(), 1);
    DatanodeDetails datanodeDetails = datanodes.get(0);
    Assert.assertNotNull(datanodeDetails);
    HddsDatanodeService datanodeService = null;
    for (HddsDatanodeService datanodeServiceItr : cluster.getHddsDatanodes()) {
        if (datanodeDetails.equals(datanodeServiceItr.getDatanodeDetails())) {
            datanodeService = datanodeServiceItr;
            break;
        }
    }
    KeyValueContainerData containerData = (KeyValueContainerData) (datanodeService.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID).getContainerData());
    try (ReferenceCountedDB db = BlockUtils.getDB(containerData, cluster.getConf());
        BlockIterator<BlockData> keyValueBlockIterator = db.getStore().getBlockIterator()) {
        while (keyValueBlockIterator.hasNext()) {
            BlockData blockData = keyValueBlockIterator.nextBlock();
            if (blockData.getBlockID().getLocalID() == localID) {
                long length = 0;
                List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
                for (ContainerProtos.ChunkInfo chunk : chunks) {
                    length += chunk.getLen();
                }
                Assert.assertEquals(length, keyValue.getBytes(UTF_8).length);
                break;
            }
        }
    }
}
Also used : HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) Test(org.junit.Test)

Example 12 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneRpcClientAbstract method testReadKeyWithCorruptedDataWithMutiNodes.

/**
 * Tests reading a corrputed chunk file throws checksum exception.
 * @throws IOException
 */
@Test
public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    byte[] data = value.getBytes(UTF_8);
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    // Write data into a key
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // We need to find the location of the chunk file corresponding to the
    // data we just wrote.
    OzoneKey key = bucket.getKey(keyName);
    List<OzoneKeyLocation> keyLocation = ((OzoneKeyDetails) key).getOzoneKeyLocations();
    Assert.assertTrue("Key location not found in OM", !keyLocation.isEmpty());
    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
    // Get the container by traversing the datanodes.
    List<Container> containerList = new ArrayList<>();
    Container container;
    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
        container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
        if (container != null) {
            containerList.add(container);
            if (containerList.size() == 3) {
                break;
            }
        }
    }
    Assert.assertTrue("Container not found", !containerList.isEmpty());
    corruptData(containerList.get(0), key);
    // failover to next replica
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        Assert.assertTrue(Arrays.equals(b, data));
    } catch (OzoneChecksumException e) {
        fail("Reading corrupted data should not fail.");
    }
    corruptData(containerList.get(1), key);
    // failover to next replica
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        Assert.assertTrue(Arrays.equals(b, data));
    } catch (OzoneChecksumException e) {
        fail("Reading corrupted data should not fail.");
    }
    corruptData(containerList.get(2), key);
    // Try reading the key. Read will fail here as all the replica are corrupt
    try {
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] b = new byte[data.length];
        is.read(b);
        fail("Reading corrupted data should fail.");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Checksum mismatch", e);
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) ArrayList(java.util.ArrayList) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OzoneChecksumException(org.apache.hadoop.ozone.common.OzoneChecksumException) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) Test(org.junit.Test)

Example 13 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneRpcClientAbstract method testCommitPartAfterCompleteUpload.

@Test
public void testCommitPartAfterCompleteUpload() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String keyName = UUID.randomUUID().toString();
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, RATIS, ONE);
    Assert.assertNotNull(omMultipartInfo.getUploadID());
    String uploadID = omMultipartInfo.getUploadID();
    // upload part 1.
    byte[] data = generateData(5 * 1024 * 1024, (byte) RandomUtils.nextLong());
    OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 1, uploadID);
    ozoneOutputStream.write(data, 0, data.length);
    ozoneOutputStream.close();
    OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo = ozoneOutputStream.getCommitUploadPartInfo();
    // Do not close output stream for part 2.
    ozoneOutputStream = bucket.createMultipartKey(keyName, data.length, 2, omMultipartInfo.getUploadID());
    ozoneOutputStream.write(data, 0, data.length);
    Map<Integer, String> partsMap = new LinkedHashMap<>();
    partsMap.put(1, omMultipartCommitUploadPartInfo.getPartName());
    OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket.completeMultipartUpload(keyName, uploadID, partsMap);
    Assert.assertNotNull(omMultipartCommitUploadPartInfo);
    byte[] fileContent = new byte[data.length];
    OzoneInputStream inputStream = bucket.readKey(keyName);
    inputStream.read(fileContent);
    StringBuilder sb = new StringBuilder(data.length);
    // Combine all parts data, and check is it matching with get key data.
    String part1 = new String(data, UTF_8);
    sb.append(part1);
    Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
    try {
        ozoneOutputStream.close();
        fail("testCommitPartAfterCompleteUpload failed");
    } catch (IOException ex) {
        assertTrue(ex instanceof OMException);
        assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ((OMException) ex).getResult());
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) IOException(java.io.IOException) LinkedHashMap(java.util.LinkedHashMap) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) OmMultipartCommitUploadPartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo) OmMultipartUploadCompleteInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo) OmMultipartInfo(org.apache.hadoop.ozone.om.helpers.OmMultipartInfo) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) Test(org.junit.Test)

Example 14 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneRpcClientAbstract method doMultipartUpload.

private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val) throws Exception {
    // Initiate Multipart upload request
    String uploadID = initiateMultipartUpload(bucket, keyName, ReplicationType.RATIS, THREE);
    // Upload parts
    Map<Integer, String> partsMap = new TreeMap<>();
    // get 5mb data, as each part should be of min 5mb, last part can be less
    // than 5mb
    int length = 0;
    byte[] data = generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, val);
    String partName = uploadPart(bucket, keyName, uploadID, 1, data);
    partsMap.put(1, partName);
    length += data.length;
    partName = uploadPart(bucket, keyName, uploadID, 2, data);
    partsMap.put(2, partName);
    length += data.length;
    String part3 = UUID.randomUUID().toString();
    partName = uploadPart(bucket, keyName, uploadID, 3, part3.getBytes(UTF_8));
    partsMap.put(3, partName);
    length += part3.getBytes(UTF_8).length;
    // Complete multipart upload request
    completeMultipartUpload(bucket, keyName, uploadID, partsMap);
    // Now Read the key which has been completed multipart upload.
    byte[] fileContent = new byte[data.length + data.length + part3.getBytes(UTF_8).length];
    OzoneInputStream inputStream = bucket.readKey(keyName);
    inputStream.read(fileContent);
    Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(), bucket.getName(), keyName, ReplicationType.RATIS, THREE));
    StringBuilder sb = new StringBuilder(length);
    // Combine all parts data, and check is it matching with get key data.
    String part1 = new String(data, UTF_8);
    String part2 = new String(data, UTF_8);
    sb.append(part1);
    sb.append(part2);
    sb.append(part3);
    Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
    String ozoneKey = ozoneManager.getMetadataManager().getOzoneKey(bucket.getVolumeName(), bucket.getName(), keyName);
    OmKeyInfo omKeyInfo = ozoneManager.getMetadataManager().getKeyTable(getBucketLayout()).get(ozoneKey);
    OmKeyLocationInfoGroup latestVersionLocations = omKeyInfo.getLatestVersionLocations();
    Assert.assertEquals(true, latestVersionLocations.isMultipartKey());
    latestVersionLocations.getBlocksLatestVersionOnly().forEach(omKeyLocationInfo -> Assert.assertTrue(omKeyLocationInfo.getPartNumber() != -1));
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) TreeMap(java.util.TreeMap)

Example 15 with OzoneInputStream

use of org.apache.hadoop.ozone.client.io.OzoneInputStream in project ozone by apache.

the class TestOzoneAtRestEncryption method createAndVerifyKeyData.

private void createAndVerifyKeyData(OzoneBucket bucket) throws Exception {
    Instant testStartTime = Instant.now();
    String keyName = UUID.randomUUID().toString();
    String value = "sample value";
    try (OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(StandardCharsets.UTF_8).length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>())) {
        out.write(value.getBytes(StandardCharsets.UTF_8));
    }
    // Verify content.
    OzoneKeyDetails key = bucket.getKey(keyName);
    Assert.assertEquals(keyName, key.getName());
    // Check file encryption info is set,
    // if set key will use this encryption info and encrypt data.
    Assert.assertTrue(key.getFileEncryptionInfo() != null);
    byte[] fileContent;
    int len = 0;
    try (OzoneInputStream is = bucket.readKey(keyName)) {
        fileContent = new byte[value.getBytes(StandardCharsets.UTF_8).length];
        len = is.read(fileContent);
    }
    Assert.assertEquals(len, value.length());
    Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(), bucket.getName(), keyName, ReplicationType.RATIS, ReplicationFactor.ONE));
    Assert.assertEquals(value, new String(fileContent, StandardCharsets.UTF_8));
    Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
    Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) Instant(java.time.Instant) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream)

Aggregations

OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)47 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)33 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)26 Test (org.junit.Test)26 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)22 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)17 IOException (java.io.IOException)15 OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)13 Instant (java.time.Instant)12 HashMap (java.util.HashMap)11 LinkedHashMap (java.util.LinkedHashMap)10 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)8 ArrayList (java.util.ArrayList)7 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)7 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)7 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)7 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)6 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)6 File (java.io.File)5 HttpHeaders (javax.ws.rs.core.HttpHeaders)5