Search in sources :

Example 11 with OzoneKey

use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.

the class BucketEndpoint method get.

/**
 * Rest endpoint to list objects in a specific bucket.
 * <p>
 * See: https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html
 * for more details.
 */
@GET
@SuppressFBWarnings
@SuppressWarnings({ "parameternumber", "methodlength" })
public Response get(@PathParam("bucket") String bucketName, @QueryParam("delimiter") String delimiter, @QueryParam("encoding-type") String encodingType, @QueryParam("marker") String marker, @DefaultValue("1000") @QueryParam("max-keys") int maxKeys, @QueryParam("prefix") String prefix, @QueryParam("continuation-token") String continueToken, @QueryParam("start-after") String startAfter, @QueryParam("uploads") String uploads, @QueryParam("acl") String aclMarker, @Context HttpHeaders hh) throws OS3Exception, IOException {
    S3GAction s3GAction = S3GAction.GET_BUCKET;
    Iterator<? extends OzoneKey> ozoneKeyIterator;
    ContinueToken decodedToken = ContinueToken.decodeFromString(continueToken);
    try {
        if (aclMarker != null) {
            s3GAction = S3GAction.GET_ACL;
            S3BucketAcl result = getAcl(bucketName);
            getMetrics().incGetAclSuccess();
            AUDIT.logReadSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
            return Response.ok(result, MediaType.APPLICATION_XML_TYPE).build();
        }
        if (uploads != null) {
            s3GAction = S3GAction.LIST_MULTIPART_UPLOAD;
            return listMultipartUploads(bucketName, prefix);
        }
        if (prefix == null) {
            prefix = "";
        }
        // Assign marker to startAfter. for the compatibility of aws api v1
        if (startAfter == null && marker != null) {
            startAfter = marker;
        }
        OzoneBucket bucket = getBucket(bucketName);
        if (startAfter != null && continueToken != null) {
            // If continuation token and start after both are provided, then we
            // ignore start After
            ozoneKeyIterator = bucket.listKeys(prefix, decodedToken.getLastKey());
        } else if (startAfter != null && continueToken == null) {
            ozoneKeyIterator = bucket.listKeys(prefix, startAfter);
        } else if (startAfter == null && continueToken != null) {
            ozoneKeyIterator = bucket.listKeys(prefix, decodedToken.getLastKey());
        } else {
            ozoneKeyIterator = bucket.listKeys(prefix);
        }
    } catch (OMException ex) {
        AUDIT.logReadFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        getMetrics().incGetBucketFailure();
        if (ex.getResult() == ResultCodes.PERMISSION_DENIED) {
            throw newError(S3ErrorTable.ACCESS_DENIED, bucketName, ex);
        } else {
            throw ex;
        }
    } catch (Exception ex) {
        getMetrics().incGetBucketFailure();
        AUDIT.logReadFailure(buildAuditMessageForFailure(s3GAction, getAuditParameters(), ex));
        throw ex;
    }
    ListObjectResponse response = new ListObjectResponse();
    response.setDelimiter(delimiter);
    response.setName(bucketName);
    response.setPrefix(prefix);
    response.setMarker(marker == null ? "" : marker);
    response.setMaxKeys(maxKeys);
    response.setEncodingType(ENCODING_TYPE);
    response.setTruncated(false);
    response.setContinueToken(continueToken);
    String prevDir = null;
    if (continueToken != null) {
        prevDir = decodedToken.getLastDir();
    }
    String lastKey = null;
    int count = 0;
    while (ozoneKeyIterator.hasNext()) {
        OzoneKey next = ozoneKeyIterator.next();
        String relativeKeyName = next.getName().substring(prefix.length());
        int depth = StringUtils.countMatches(relativeKeyName, delimiter);
        if (delimiter != null) {
            if (depth > 0) {
                // means key has multiple delimiters in its value.
                // ex: dir/dir1/dir2, where delimiter is "/" and prefix is dir/
                String dirName = relativeKeyName.substring(0, relativeKeyName.indexOf(delimiter));
                if (!dirName.equals(prevDir)) {
                    response.addPrefix(prefix + dirName + delimiter);
                    prevDir = dirName;
                    count++;
                }
            } else if (relativeKeyName.endsWith(delimiter)) {
                // means or key is same as prefix with delimiter at end and ends with
                // delimiter. ex: dir/, where prefix is dir and delimiter is /
                response.addPrefix(relativeKeyName);
                count++;
            } else {
                // means our key is matched with prefix if prefix is given and it
                // does not have any common prefix.
                addKey(response, next);
                count++;
            }
        } else {
            addKey(response, next);
            count++;
        }
        if (count == maxKeys) {
            lastKey = next.getName();
            break;
        }
    }
    response.setKeyCount(count);
    if (count < maxKeys) {
        response.setTruncated(false);
    } else if (ozoneKeyIterator.hasNext()) {
        response.setTruncated(true);
        ContinueToken nextToken = new ContinueToken(lastKey, prevDir);
        response.setNextToken(nextToken.encodeToString());
        // Set nextMarker to be lastKey. for the compatibility of aws api v1
        response.setNextMarker(lastKey);
    } else {
        response.setTruncated(false);
    }
    AUDIT.logReadSuccess(buildAuditMessageForSuccess(s3GAction, getAuditParameters()));
    getMetrics().incGetBucketSuccess();
    response.setKeyCount(response.getCommonPrefixes().size() + response.getContents().size());
    return Response.ok(response).build();
}
Also used : OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) ContinueToken(org.apache.hadoop.ozone.s3.util.ContinueToken) S3GAction(org.apache.hadoop.ozone.audit.S3GAction) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) OS3Exception(org.apache.hadoop.ozone.s3.exception.OS3Exception) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) IOException(java.io.IOException) GET(javax.ws.rs.GET) SuppressFBWarnings(edu.umd.cs.findbugs.annotations.SuppressFBWarnings)

Example 12 with OzoneKey

use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.

the class TestOzoneRpcClientAbstract method testZReadKeyWithUnhealthyContainerReplica.

// Make this executed at last, for it has some side effect to other UTs
@Test
@Flaky("HDDS-6151")
public void testZReadKeyWithUnhealthyContainerReplica() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName1 = UUID.randomUUID().toString();
    // Write first key
    OzoneOutputStream out = bucket.createKey(keyName1, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // Write second key
    String keyName2 = UUID.randomUUID().toString();
    value = "unhealthy container replica";
    out = bucket.createKey(keyName2, value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    // Find container ID
    OzoneKey key = bucket.getKey(keyName2);
    long containerID = ((OzoneKeyDetails) key).getOzoneKeyLocations().get(0).getContainerID();
    // Set container replica to UNHEALTHY
    Container container;
    int index = 1;
    List<HddsDatanodeService> involvedDNs = new ArrayList<>();
    for (HddsDatanodeService hddsDatanode : cluster.getHddsDatanodes()) {
        container = hddsDatanode.getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerID);
        if (container == null) {
            continue;
        }
        container.markContainerUnhealthy();
        // Change first and second replica commit sequenceId
        if (index < 3) {
            long newBCSID = container.getBlockCommitSequenceId() - 1;
            KeyValueContainerData cData = (KeyValueContainerData) container.getContainerData();
            try (DBHandle db = BlockUtils.getDB(cData, cluster.getConf())) {
                db.getStore().getMetadataTable().put(cData.bcsIdKey(), newBCSID);
            }
            container.updateBlockCommitSequenceId(newBCSID);
            index++;
        }
        involvedDNs.add(hddsDatanode);
    }
    // Restart DNs
    int dnCount = involvedDNs.size();
    for (index = 0; index < dnCount; index++) {
        if (index == dnCount - 1) {
            cluster.restartHddsDatanode(involvedDNs.get(index).getDatanodeDetails(), true);
        } else {
            cluster.restartHddsDatanode(involvedDNs.get(index).getDatanodeDetails(), false);
        }
    }
    StorageContainerManager scm = cluster.getStorageContainerManager();
    GenericTestUtils.waitFor(() -> {
        try {
            ContainerInfo containerInfo = scm.getContainerInfo(containerID);
            System.out.println("state " + containerInfo.getState());
            return containerInfo.getState() == HddsProtos.LifeCycleState.CLOSING;
        } catch (IOException e) {
            fail("Failed to get container info for " + e.getMessage());
            return false;
        }
    }, 1000, 10000);
    // Try reading keyName2
    try {
        GenericTestUtils.setLogLevel(XceiverClientGrpc.getLogger(), DEBUG);
        OzoneInputStream is = bucket.readKey(keyName2);
        byte[] content = new byte[100];
        is.read(content);
        String retValue = new String(content, UTF_8);
        Assert.assertTrue(value.equals(retValue.trim()));
    } catch (IOException e) {
        fail("Reading unhealthy replica should succeed.");
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) HddsDatanodeService(org.apache.hadoop.ozone.HddsDatanodeService) IOException(java.io.IOException) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) OzoneKeyDetails(org.apache.hadoop.ozone.client.OzoneKeyDetails) DBHandle(org.apache.hadoop.ozone.container.common.interfaces.DBHandle) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test) Flaky(org.apache.ozone.test.tag.Flaky)

Example 13 with OzoneKey

use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.

the class TestOzoneRpcClientAbstract method testPutKeyRatisThreeNodesParallel.

@Test
public void testPutKeyRatisThreeNodesParallel() throws IOException, InterruptedException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    CountDownLatch latch = new CountDownLatch(2);
    AtomicInteger failCount = new AtomicInteger(0);
    Runnable r = () -> {
        try {
            for (int i = 0; i < 5; i++) {
                String keyName = UUID.randomUUID().toString();
                String data = Arrays.toString(generateData(5 * 1024 * 1024, (byte) RandomUtils.nextLong()));
                OzoneOutputStream out = bucket.createKey(keyName, data.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>());
                out.write(data.getBytes(UTF_8));
                out.close();
                OzoneKey key = bucket.getKey(keyName);
                Assert.assertEquals(keyName, key.getName());
                OzoneInputStream is = bucket.readKey(keyName);
                byte[] fileContent = new byte[data.getBytes(UTF_8).length];
                is.read(fileContent);
                is.close();
                verifyReplication(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
                Assert.assertEquals(data, new String(fileContent, UTF_8));
                Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
                Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
            }
            latch.countDown();
        } catch (IOException ex) {
            latch.countDown();
            failCount.incrementAndGet();
        }
    };
    Thread thread1 = new Thread(r);
    Thread thread2 = new Thread(r);
    thread1.start();
    thread2.start();
    latch.await(600, TimeUnit.SECONDS);
    if (failCount.get() > 0) {
        fail("testPutKeyRatisThreeNodesParallel failed");
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) Instant(java.time.Instant) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 14 with OzoneKey

use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.

the class TestOzoneRpcClientAbstract method testHeadObject.

@Test
public void testHeadObject() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    ReplicationConfig replicationConfig = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, replicationConfig, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    OzoneKey key = bucket.headObject(keyName);
    Assert.assertEquals(volumeName, key.getVolumeName());
    Assert.assertEquals(bucketName, key.getBucketName());
    Assert.assertEquals(keyName, key.getName());
    Assert.assertEquals(replicationConfig.getReplicationType(), key.getReplicationConfig().getReplicationType());
    Assert.assertEquals(replicationConfig.getRequiredNodes(), key.getReplicationConfig().getRequiredNodes());
    Assert.assertEquals(value.getBytes(UTF_8).length, key.getDataSize());
    try {
        bucket.headObject(UUID.randomUUID().toString());
    } catch (OMException ex) {
        Assert.assertEquals(ResultCodes.KEY_NOT_FOUND, ex.getResult());
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) DefaultReplicationConfig(org.apache.hadoop.hdds.client.DefaultReplicationConfig) ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) RatisReplicationConfig(org.apache.hadoop.hdds.client.RatisReplicationConfig) ECReplicationConfig(org.apache.hadoop.hdds.client.ECReplicationConfig) StandaloneReplicationConfig(org.apache.hadoop.hdds.client.StandaloneReplicationConfig) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OMException(org.apache.hadoop.ozone.om.exceptions.OMException) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 15 with OzoneKey

use of org.apache.hadoop.ozone.client.OzoneKey in project ozone by apache.

the class TestOzoneRpcClientAbstract method testPutKey.

@Test
public void testPutKey() throws IOException {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    for (int i = 0; i < 10; i++) {
        String keyName = UUID.randomUUID().toString();
        OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
        out.write(value.getBytes(UTF_8));
        out.close();
        OzoneKey key = bucket.getKey(keyName);
        Assert.assertEquals(keyName, key.getName());
        OzoneInputStream is = bucket.readKey(keyName);
        byte[] fileContent = new byte[value.getBytes(UTF_8).length];
        is.read(fileContent);
        verifyReplication(volumeName, bucketName, keyName, RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE));
        Assert.assertEquals(value, new String(fileContent, UTF_8));
        Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
        Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    }
}
Also used : OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) Instant(java.time.Instant) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Aggregations

OzoneKey (org.apache.hadoop.ozone.client.OzoneKey)31 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)20 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)19 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)18 OzoneInputStream (org.apache.hadoop.ozone.client.io.OzoneInputStream)15 Test (org.junit.jupiter.api.Test)12 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)10 IOException (java.io.IOException)8 Instant (java.time.Instant)7 HddsDatanodeService (org.apache.hadoop.ozone.HddsDatanodeService)7 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)6 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)6 Test (org.junit.Test)6 OzoneKeyDetails (org.apache.hadoop.ozone.client.OzoneKeyDetails)5 HashMap (java.util.HashMap)4 LinkedList (java.util.LinkedList)4 File (java.io.File)3 ObjectStore (org.apache.hadoop.ozone.client.ObjectStore)3 OzoneContainer (org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer)3 ArrayList (java.util.ArrayList)2