Search in sources :

Example 1 with ObjectStore

use of org.apache.hadoop.ozone.client.ObjectStore in project ozone by apache.

the class TestStorageContainerManagerHA method testPutKey.

public void testPutKey() throws Exception {
    String volumeName = UUID.randomUUID().toString();
    String bucketName = UUID.randomUUID().toString();
    Instant testStartTime = Instant.now();
    ObjectStore store = OzoneClientFactory.getRpcClient(cluster.getConf()).getObjectStore();
    String value = "sample value";
    store.createVolume(volumeName);
    OzoneVolume volume = store.getVolume(volumeName);
    volume.createBucket(bucketName);
    OzoneBucket bucket = volume.getBucket(bucketName);
    String keyName = UUID.randomUUID().toString();
    OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
    out.write(value.getBytes(UTF_8));
    out.close();
    OzoneKey key = bucket.getKey(keyName);
    Assert.assertEquals(keyName, key.getName());
    OzoneInputStream is = bucket.readKey(keyName);
    byte[] fileContent = new byte[value.getBytes(UTF_8).length];
    is.read(fileContent);
    Assert.assertEquals(value, new String(fileContent, UTF_8));
    Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
    Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
    is.close();
    final OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
    final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    final List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
    long index = -1;
    for (StorageContainerManager scm : cluster.getStorageContainerManagers()) {
        if (scm.checkLeader()) {
            index = getLastAppliedIndex(scm);
        }
    }
    Assert.assertFalse(index == -1);
    long finalIndex = index;
    // Ensure all follower scms have caught up with the leader
    GenericTestUtils.waitFor(() -> areAllScmInSync(finalIndex), 100, 10000);
    final long containerID = keyLocationInfos.get(0).getContainerID();
    for (int k = 0; k < numOfSCMs; k++) {
        StorageContainerManager scm = cluster.getStorageContainerManagers().get(k);
        // flush to DB on each SCM
        ((SCMRatisServerImpl) scm.getScmHAManager().getRatisServer()).getStateMachine().takeSnapshot();
        Assert.assertTrue(scm.getContainerManager().containerExist(ContainerID.valueOf(containerID)));
        Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(ContainerID.valueOf(containerID)));
    }
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) Instant(java.time.Instant) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) OzoneVolume(org.apache.hadoop.ozone.client.OzoneVolume) OzoneBucket(org.apache.hadoop.ozone.client.OzoneBucket) OzoneKey(org.apache.hadoop.ozone.client.OzoneKey) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo)

Example 2 with ObjectStore

use of org.apache.hadoop.ozone.client.ObjectStore in project ozone by apache.

the class TestOzoneManagerHAMetadataOnly method testAllVolumeOperations.

@Test
public void testAllVolumeOperations() throws Exception {
    String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
    createAndCheckVolume(volumeName);
    ObjectStore objectStore = getObjectStore();
    objectStore.deleteVolume(volumeName);
    OzoneTestUtils.expectOmException(OMException.ResultCodes.VOLUME_NOT_FOUND, () -> objectStore.getVolume(volumeName));
    OzoneTestUtils.expectOmException(OMException.ResultCodes.VOLUME_NOT_FOUND, () -> objectStore.deleteVolume(volumeName));
}
Also used : ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) Test(org.junit.Test)

Example 3 with ObjectStore

use of org.apache.hadoop.ozone.client.ObjectStore in project ozone by apache.

the class TestOzoneManagerHAMetadataOnly method testOMProxyProviderFailoverToCurrentLeader.

/**
 * Test OMFailoverProxyProvider failover when current OM proxy is not
 * the current OM Leader.
 */
@Test
public void testOMProxyProviderFailoverToCurrentLeader() throws Exception {
    ObjectStore objectStore = getObjectStore();
    OMFailoverProxyProvider omFailoverProxyProvider = OmFailoverProxyUtil.getFailoverProxyProvider(objectStore.getClientProxy());
    // Run couple of createVolume tests to discover the current Leader OM
    createVolumeTest(true);
    createVolumeTest(true);
    // The OMFailoverProxyProvider will point to the current leader OM node.
    String leaderOMNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
    // Perform a manual failover of the proxy provider to move the
    // currentProxyIndex to a node other than the leader OM.
    omFailoverProxyProvider.performFailoverToNextProxy();
    String newProxyNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
    Assert.assertNotEquals(leaderOMNodeId, newProxyNodeId);
    // Once another request is sent to this new proxy node, the leader
    // information must be returned via the response and a failover must
    // happen to the leader proxy node.
    createVolumeTest(true);
    Thread.sleep(2000);
    String newLeaderOMNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
    // The old and new Leader OM NodeId must match since there was no new
    // election in the Ratis ring.
    Assert.assertEquals(leaderOMNodeId, newLeaderOMNodeId);
}
Also used : ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) OMFailoverProxyProvider(org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider) Test(org.junit.Test)

Example 4 with ObjectStore

use of org.apache.hadoop.ozone.client.ObjectStore in project ozone by apache.

the class TestOzoneManagerHAMetadataOnly method testOMRetryCache.

@Test
public void testOMRetryCache() throws Exception {
    ObjectStore objectStore = getObjectStore();
    objectStore.createVolume(UUID.randomUUID().toString());
    OMFailoverProxyProvider omFailoverProxyProvider = OmFailoverProxyUtil.getFailoverProxyProvider(objectStore.getClientProxy());
    String currentLeaderNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId();
    OzoneManagerRatisServer ozoneManagerRatisServer = getCluster().getOzoneManager(currentLeaderNodeId).getOmRatisServer();
    RaftServer raftServer = ozoneManagerRatisServer.getServer();
    ClientId clientId = ClientId.randomId();
    long callId = 2000L;
    String userName = UserGroupInformation.getCurrentUser().getUserName();
    String volumeName = UUID.randomUUID().toString();
    GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(OMVolumeCreateRequest.getLogger());
    OMRequest omRequest = OMRequest.newBuilder().setCreateVolumeRequest(CreateVolumeRequest.newBuilder().setVolumeInfo(VolumeInfo.newBuilder().setOwnerName(userName).setAdminName(userName).setVolume(volumeName).build()).build()).setClientId(UUID.randomUUID().toString()).setCmdType(OzoneManagerProtocolProtos.Type.CreateVolume).build();
    RaftClientReply raftClientReply = raftServer.submitClientRequest(RaftClientRequest.newBuilder().setClientId(clientId).setServerId(raftServer.getId()).setGroupId(ozoneManagerRatisServer.getRaftGroup().getGroupId()).setCallId(callId).setMessage(Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest))).setType(RaftClientRequest.writeRequestType()).build());
    Assert.assertTrue(raftClientReply.isSuccess());
    Assert.assertTrue(logCapturer.getOutput().contains("created volume:" + volumeName));
    logCapturer.clearOutput();
    raftClientReply = raftServer.submitClientRequest(RaftClientRequest.newBuilder().setClientId(clientId).setServerId(raftServer.getId()).setGroupId(ozoneManagerRatisServer.getRaftGroup().getGroupId()).setCallId(callId).setMessage(Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest))).setType(RaftClientRequest.writeRequestType()).build());
    Assert.assertTrue(raftClientReply.isSuccess());
    // As second time with same client id and call id, this request should
    // not be executed ratis server should return from cache.
    // If 2nd time executed, it will fail with Volume creation failed. check
    // for that.
    Assert.assertFalse(logCapturer.getOutput().contains("Volume creation failed"));
    // Sleep for little above retry cache duration to get cache clear.
    Thread.sleep(getRetryCacheDuration().toMillis() + 5000);
    raftClientReply = raftServer.submitClientRequest(RaftClientRequest.newBuilder().setClientId(clientId).setServerId(raftServer.getId()).setGroupId(ozoneManagerRatisServer.getRaftGroup().getGroupId()).setCallId(callId).setMessage(Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest))).setType(RaftClientRequest.writeRequestType()).build());
    Assert.assertTrue(raftClientReply.isSuccess());
    // As second time with same client id and call id, this request should
    // be executed by ratis server as we are sending this request after cache
    // expiry duration.
    Assert.assertTrue(logCapturer.getOutput().contains("Volume creation failed"));
}
Also used : OMRequest(org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest) ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) OMFailoverProxyProvider(org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider) RaftClientReply(org.apache.ratis.protocol.RaftClientReply) RaftServer(org.apache.ratis.server.RaftServer) ClientId(org.apache.ratis.protocol.ClientId) GenericTestUtils(org.apache.ozone.test.GenericTestUtils) OzoneManagerRatisServer(org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer) Test(org.junit.Test)

Example 5 with ObjectStore

use of org.apache.hadoop.ozone.client.ObjectStore in project ozone by apache.

the class TestOzoneManagerListVolumes method setupClass.

/**
 * Create a MiniDFSCluster for testing.
 */
@BeforeClass
public static void setupClass() throws InterruptedException, TimeoutException, IOException {
    OzoneConfiguration conf = new OzoneConfiguration();
    UserGroupInformation.setLoginUser(adminUser);
    String clusterId = UUID.randomUUID().toString();
    String scmId = UUID.randomUUID().toString();
    String omId = UUID.randomUUID().toString();
    conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
    conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
    // Use native impl here, default impl doesn't do actual checks
    conf.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
    cluster = MiniOzoneCluster.newBuilder(conf).withoutDatanodes().setClusterId(clusterId).setScmId(scmId).setOmId(omId).build();
    cluster.waitForClusterToBeReady();
    // Create volumes with non-default owners and ACLs
    OzoneClient client = cluster.getClient();
    ObjectStore objectStore = client.getObjectStore();
    /* r = READ, w = WRITE, c = CREATE, d = DELETE
       l = LIST, a = ALL, n = NONE, x = READ_ACL, y = WRITE_ACL */
    String aclUser1All = "user:user1:a";
    String aclUser2All = "user:user2:a";
    String aclWorldAll = "world::a";
    createVolumeWithOwnerAndAcl(objectStore, "volume1", "user1", aclUser1All);
    createVolumeWithOwnerAndAcl(objectStore, "volume2", "user2", aclUser2All);
    createVolumeWithOwnerAndAcl(objectStore, "volume3", "user1", aclUser2All);
    createVolumeWithOwnerAndAcl(objectStore, "volume4", "user2", aclUser1All);
    createVolumeWithOwnerAndAcl(objectStore, "volume5", "user1", aclWorldAll);
    OzoneManager om = cluster.getOzoneManager();
    om.stop();
    om.join();
}
Also used : ObjectStore(org.apache.hadoop.ozone.client.ObjectStore) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OzoneClient(org.apache.hadoop.ozone.client.OzoneClient) BeforeClass(org.junit.BeforeClass)

Aggregations

ObjectStore (org.apache.hadoop.ozone.client.ObjectStore)56 Test (org.junit.Test)37 OzoneVolume (org.apache.hadoop.ozone.client.OzoneVolume)34 OzoneClient (org.apache.hadoop.ozone.client.OzoneClient)31 OzoneBucket (org.apache.hadoop.ozone.client.OzoneBucket)24 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)12 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)9 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)8 IOException (java.io.IOException)6 OzoneAcl (org.apache.hadoop.ozone.OzoneAcl)6 BucketArgs (org.apache.hadoop.ozone.client.BucketArgs)5 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)5 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)5 VolumeArgs (org.apache.hadoop.ozone.client.VolumeArgs)4 OMFailoverProxyProvider (org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider)4 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)4 Before (org.junit.Before)4 HashMap (java.util.HashMap)3 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)3 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)3