Search in sources :

Example 11 with OzoneKeyDetails

use of org.apache.hadoop.ozone.client.OzoneKeyDetails in project ozone by apache.

the class TestOMRatisSnapshots method getKeys.

private void getKeys(List<String> keys, int round) throws IOException {
    while (round > 0) {
        for (String keyName : keys) {
            OzoneKeyDetails key = ozoneBucket.getKey(keyName);
            Assert.assertEquals(keyName, key.getName());
        }
        round--;
    }
}
Also used : OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails)

Example 12 with OzoneKeyDetails

use of org.apache.hadoop.ozone.client.OzoneKeyDetails in project ozone by apache.

the class TestReadRetries method testPutKeyAndGetKeyThreeNodes.

@Test
public void testPutKeyAndGetKeyThreeNodes() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = "a/b/c/" + UUID.randomUUID().toString();
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>());
    KeyOutputStream groupOutputStream = (KeyOutputStream) out.getOutputStream();
    XceiverClientFactory factory = groupOutputStream.getXceiverClientFactory();
    out.write(value.getBytes(UTF_8));
    out.close();
    // First, confirm the key info from the client matches the info in OM.
    OmKeyArgs.Builder builder = new OmKeyArgs.Builder();
    builder.setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true);
    OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()).getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0);
    long containerID = keyInfo.getContainerID();
    long localID = keyInfo.getLocalID();
    OzoneKeyDetails keyDetails = bucket.getKey(keyName);
    Assert.assertEquals(keyName, keyDetails.getName());
    List<OzoneKeyLocation> keyLocations = keyDetails.getOzoneKeyLocations();
    Assert.assertEquals(1, keyLocations.size());
    Assert.assertEquals(containerID, keyLocations.get(0).getContainerID());
    Assert.assertEquals(localID, keyLocations.get(0).getLocalID());
    // Make sure that the data size matched.
    Assert.assertEquals(value.getBytes(UTF_8).length, keyLocations.get(0).getLength());
    ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainer(ContainerID.valueOf(containerID));
    Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager().getPipeline(container.getPipelineID());
    List<DatanodeDetails> datanodes = pipeline.getNodes();
    DatanodeDetails datanodeDetails = datanodes.get(0);
    Assert.assertNotNull(datanodeDetails);
    XceiverClientSpi clientSpi = factory.acquireClient(pipeline);
    Assert.assertTrue(clientSpi instanceof XceiverClientRatis);
    XceiverClientRatis ratisClient = (XceiverClientRatis) clientSpi;
    ratisClient.watchForCommit(keyInfo.getBlockCommitSequenceId());
    // shutdown the datanode
    cluster.shutdownHddsDatanode(datanodeDetails);
    // try to read, this should be successful
    readKey(bucket, keyName, value);
    // read intermediate directory
    verifyIntermediateDir(bucket, "a/b/c");
    // shutdown the second datanode
    datanodeDetails = datanodes.get(1);
    cluster.shutdownHddsDatanode(datanodeDetails);
    // we still should be able to read via Standalone protocol
    // try to read
    readKey(bucket, keyName, value);
    // shutdown the 3rd datanode
    datanodeDetails = datanodes.get(2);
    cluster.shutdownHddsDatanode(datanodeDetails);
    try {
        // try to read
        readKey(bucket, keyName, value);
        fail("Expected exception not thrown");
    } catch (IOException e) {
    // it should throw an ioException as none of the servers
    // are available
    }
    factory.releaseClient(clientSpi, false);
}
Also used : OzoneKeyLocation(org.apache.hadoop.ozone.client.OzoneKeyLocation) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) IOException(java.io.IOException) XceiverClientFactory(org.apache.hadoop.hdds.scm.XceiverClientFactory) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) Test(org.junit.Test)

Example 13 with OzoneKeyDetails

use of org.apache.hadoop.ozone.client.OzoneKeyDetails in project ozone by apache.

the class TestObjectStoreWithFSO method testLookupKey.

@Test
public void testLookupKey() throws Exception {
    String parent = "a/b/c/";
    String fileName = "key" + RandomStringUtils.randomNumeric(5);
    String key = parent + fileName;
    OzoneClient client = cluster.getClient();
    ObjectStore objectStore = client.getObjectStore();
    OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
    Assert.assertTrue(ozoneVolume.getName().equals(volumeName));
    OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
    Assert.assertTrue(ozoneBucket.getName().equals(bucketName));
    Table<String, OmKeyInfo> openFileTable = cluster.getOzoneManager().getMetadataManager().getOpenKeyTable(getBucketLayout());
    String data = "random data";
    OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(key, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
    KeyOutputStream keyOutputStream = (KeyOutputStream) ozoneOutputStream.getOutputStream();
    long clientID = keyOutputStream.getClientID();
    OmDirectoryInfo dirPathC = getDirInfo(parent);
    Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
    // after file creation
    verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), false);
    ozoneOutputStream.write(data.getBytes(StandardCharsets.UTF_8), 0, data.length());
    // open key
    try {
        ozoneBucket.getKey(key);
        fail("Should throw exception as fileName is not visible and its still " + "open for writing!");
    } catch (OMException ome) {
        // expected
        assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
    }
    ozoneOutputStream.close();
    OzoneKeyDetails keyDetails = ozoneBucket.getKey(key);
    Assert.assertEquals(key, keyDetails.getName());
    Table<String, OmKeyInfo> fileTable = cluster.getOzoneManager().getMetadataManager().getKeyTable(getBucketLayout());
    // When closing the key, entry should be removed from openFileTable
    // and it should be added to fileTable.
    verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), false);
    verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), true);
    ozoneBucket.deleteKey(key);
    // get deleted key
    try {
        ozoneBucket.getKey(key);
        fail("Should throw exception as fileName not exists!");
    } catch (OMException ome) {
        // expected
        assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
    }
    // after key delete
    verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), true);
    verifyKeyInOpenFileTable(openFileTable, clientID, fileName, dirPathC.getObjectID(), true);
}
Also used : ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OzoneClient(org.apache.hadoop.ozone.client.OzoneClient) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OmDirectoryInfo(org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) Test(org.junit.Test)

Example 14 with OzoneKeyDetails

use of org.apache.hadoop.ozone.client.OzoneKeyDetails in project ozone by apache.

the class ObjectEndpoint method copyObject.

private CopyObjectResponse copyObject(String copyHeader, OzoneBucket destBucket, String destkey, ReplicationConfig replicationConfig, boolean storageTypeDefault) throws OS3Exception, IOException {
    Pair<String, String> result = parseSourceHeader(copyHeader);
    String sourceBucket = result.getLeft();
    String sourceKey = result.getRight();
    OzoneInputStream sourceInputStream = null;
    OzoneOutputStream destOutputStream = null;
    boolean closed = false;
    try {
        if (sourceBucket.equals(destBucket.getName()) && sourceKey.equals(destkey)) {
            // dest are given same
            if (storageTypeDefault) {
                OS3Exception ex = newError(S3ErrorTable.INVALID_REQUEST, copyHeader);
                ex.setErrorMessage("This copy request is illegal because it is " + "trying to copy an object to it self itself without changing " + "the object's metadata, storage class, website redirect " + "location or encryption attributes.");
                throw ex;
            } else {
                // TODO: Actually here we should change storage type, as ozone
                // still does not support this just returning dummy response
                // for now
                CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
                copyObjectResponse.setETag(OzoneUtils.getRequestID());
                copyObjectResponse.setLastModified(Instant.ofEpochMilli(Time.now()));
                return copyObjectResponse;
            }
        }
        OzoneBucket sourceOzoneBucket = getBucket(sourceBucket);
        OzoneBucket destOzoneBucket = destBucket;
        OzoneKeyDetails sourceKeyDetails = sourceOzoneBucket.getKey(sourceKey);
        long sourceKeyLen = sourceKeyDetails.getDataSize();
        sourceInputStream = sourceOzoneBucket.readKey(sourceKey);
        destOutputStream = destOzoneBucket.createKey(destkey, sourceKeyLen, replicationConfig, new HashMap<>());
        IOUtils.copy(sourceInputStream, destOutputStream);
        // Closing here, as if we don't call close this key will not commit in
        // OM, and getKey fails.
        sourceInputStream.close();
        destOutputStream.close();
        closed = true;
        OzoneKeyDetails destKeyDetails = destOzoneBucket.getKey(destkey);
        getMetrics().incCopyObjectSuccess();
        CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
        copyObjectResponse.setETag(OzoneUtils.getRequestID());
        copyObjectResponse.setLastModified(destKeyDetails.getModificationTime());
        return copyObjectResponse;
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
            throw newError(S3ErrorTable.NO_SUCH_KEY, sourceKey, ex);
        } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
            throw newError(S3ErrorTable.NO_SUCH_BUCKET, sourceBucket, ex);
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, destBucket + "/" + destkey, ex);
        }
        throw ex;
    } finally {
        if (!closed) {
            if (sourceInputStream != null) {
                sourceInputStream.close();
            }
            if (destOutputStream != null) {
                destOutputStream.close();
            }
        }
    }
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Example 15 with OzoneKeyDetails

use of org.apache.hadoop.ozone.client.OzoneKeyDetails in project ozone by apache.

the class ObjectEndpoint method copyObject.

private CopyObjectResponse copyObject(String copyHeader, String destBucket, String destkey, ReplicationType replicationType, ReplicationFactor replicationFactor, boolean storageTypeDefault) throws OS3Exception, IOException {
    Pair<String, String> result = parseSourceHeader(copyHeader);
    String sourceBucket = result.getLeft();
    String sourceKey = result.getRight();
    OzoneInputStream sourceInputStream = null;
    OzoneOutputStream destOutputStream = null;
    boolean closed = false;
    try {
        if (sourceBucket.equals(destBucket) && sourceKey.equals(destkey)) {
            // dest are given same
            if (storageTypeDefault) {
                OS3Exception ex = newError(S3ErrorTable.INVALID_REQUEST, copyHeader);
                ex.setErrorMessage("This copy request is illegal because it is " + "trying to copy an object to it self itself without changing " + "the object's metadata, storage class, website redirect " + "location or encryption attributes.");
                throw ex;
            } else {
                // TODO: Actually here we should change storage type, as ozone
                // still does not support this just returning dummy response
                // for now
                CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
                copyObjectResponse.setETag(OzoneUtils.getRequestID());
                copyObjectResponse.setLastModified(Instant.ofEpochMilli(Time.now()));
                return copyObjectResponse;
            }
        }
        OzoneBucket sourceOzoneBucket = getBucket(sourceBucket);
        OzoneBucket destOzoneBucket = getBucket(destBucket);
        OzoneKeyDetails sourceKeyDetails = sourceOzoneBucket.getKey(sourceKey);
        long sourceKeyLen = sourceKeyDetails.getDataSize();
        sourceInputStream = sourceOzoneBucket.readKey(sourceKey);
        destOutputStream = destOzoneBucket.createKey(destkey, sourceKeyLen, replicationType, replicationFactor, new HashMap<>());
        IOUtils.copy(sourceInputStream, destOutputStream);
        // Closing here, as if we don't call close this key will not commit in
        // OM, and getKey fails.
        sourceInputStream.close();
        destOutputStream.close();
        closed = true;
        OzoneKeyDetails destKeyDetails = destOzoneBucket.getKey(destkey);
        CopyObjectResponse copyObjectResponse = new CopyObjectResponse();
        copyObjectResponse.setETag(OzoneUtils.getRequestID());
        copyObjectResponse.setLastModified(destKeyDetails.getModificationTime());
        return copyObjectResponse;
    } catch (OMException ex) {
        if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
            throw newError(S3ErrorTable.NO_SUCH_KEY, sourceKey, ex);
        } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
            throw newError(S3ErrorTable.NO_SUCH_BUCKET, sourceBucket, ex);
        } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, destBucket + "/" + destkey, ex);
        }
        throw ex;
    } finally {
        if (!closed) {
            if (sourceInputStream != null) {
                sourceInputStream.close();
            }
            if (destOutputStream != null) {
                destOutputStream.close();
            }
        }
    }
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException)

Aggregations

OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)33 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)21 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)19 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)15 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)13 Test (org.junit.Test)13 IOException (java.io.IOException)10 HashMap (java.util.HashMap)9 LinkedHashMap (java.util.LinkedHashMap)6 OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)6 Test (org.junit.jupiter.api.Test)6 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)6 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)5 OFSPath (org.apache.hadoop.ozone.OFSPath)5 BucketArgs (org.apache.hadoop.ozone.client.BucketArgs)5 ArrayList (java.util.ArrayList)4 Path (org.apache.hadoop.fs.Path)4 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)4 OzoneKeyLocation (org.apache.hadoop.ozone.client.OzoneKeyLocation)4 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)4