Search in sources :

Example 26 with OmKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.

the class TestOMDBUpdatesHandler method testDelete.

@Test
public void testDelete() throws Exception {
    // Write 1 volume, 1 key into source and target OM DBs.
    String volumeKey = omMetadataManager.getVolumeKey("sampleVol");
    String nonExistVolumeKey = omMetadataManager.getVolumeKey("nonExistingVolume");
    OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol").setAdminName("bilbo").setOwnerName("bilbo").build();
    omMetadataManager.getVolumeTable().put(volumeKey, args);
    reconOmMetadataManager.getVolumeTable().put(volumeKey, args);
    OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
    omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key_one", omKeyInfo);
    reconOmMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key_one", omKeyInfo);
    // Delete the volume and key from target DB.
    omMetadataManager.getKeyTable(getBucketLayout()).delete("/sampleVol/bucketOne/key_one");
    omMetadataManager.getVolumeTable().delete(volumeKey);
    // Delete a non-existing volume and key
    omMetadataManager.getKeyTable(getBucketLayout()).delete("/sampleVol/bucketOne/key_two");
    omMetadataManager.getVolumeTable().delete(omMetadataManager.getVolumeKey("nonExistingVolume"));
    List<byte[]> writeBatches = getBytesFromOmMetaManager(3);
    OMDBUpdatesHandler omdbUpdatesHandler = captureEvents(writeBatches);
    List<OMDBUpdateEvent> events = omdbUpdatesHandler.getEvents();
    assertEquals(4, events.size());
    OMDBUpdateEvent keyEvent = events.get(0);
    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, keyEvent.getAction());
    assertEquals("/sampleVol/bucketOne/key_one", keyEvent.getKey());
    assertEquals(omKeyInfo, keyEvent.getValue());
    OMDBUpdateEvent volEvent = events.get(1);
    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, volEvent.getAction());
    assertEquals(volumeKey, volEvent.getKey());
    assertNotNull(volEvent.getValue());
    OmVolumeArgs volumeInfo = (OmVolumeArgs) volEvent.getValue();
    assertEquals("sampleVol", volumeInfo.getVolume());
    // Assert the values of non existent keys are set to null.
    OMDBUpdateEvent nonExistKey = events.get(2);
    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, nonExistKey.getAction());
    assertEquals("/sampleVol/bucketOne/key_two", nonExistKey.getKey());
    assertNull(nonExistKey.getValue());
    OMDBUpdateEvent nonExistVolume = events.get(3);
    assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, nonExistVolume.getAction());
    assertEquals(nonExistVolumeKey, nonExistVolume.getKey());
    assertNull(nonExistVolume.getValue());
}
Also used : OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) Test(org.junit.Test)

Example 27 with OmKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.

the class TestOMDBUpdatesHandler method testOperateOnSameEntry.

@Test
public void testOperateOnSameEntry() throws Exception {
    // Create 1 volume, 1 key and write to source OM DB.
    String volumeKey = omMetadataManager.getVolumeKey("sampleVol");
    OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol").setAdminName("bilbo").setOwnerName("bilbo").build();
    omMetadataManager.getVolumeTable().put(volumeKey, args);
    OmKeyInfo key = getOmKeyInfo("sampleVol", "bucketOne", "key");
    omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key", key);
    OmKeyInfo keyNewValue = getOmKeyInfo("sampleVol", "bucketOne", "key_new");
    omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key", keyNewValue);
    OmKeyInfo keyNewValue2 = getOmKeyInfo("sampleVol", "bucketOne", "key_new2");
    omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key", keyNewValue2);
    omMetadataManager.getKeyTable(getBucketLayout()).delete("/sampleVol/bucketOne/key");
    omMetadataManager.getKeyTable(getBucketLayout()).delete("/sampleVol/bucketOne/key");
    omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key", keyNewValue2);
    List<byte[]> writeBatches = getBytesFromOmMetaManager(0);
    OMDBUpdatesHandler omdbUpdatesHandler = captureEvents(writeBatches);
    List<OMDBUpdateEvent> events = omdbUpdatesHandler.getEvents();
    assertEquals(7, events.size());
    OMDBUpdateEvent volEvent = events.get(0);
    assertEquals(PUT, volEvent.getAction());
    assertEquals(volumeKey, volEvent.getKey());
    assertEquals(args.getVolume(), ((OmVolumeArgs) volEvent.getValue()).getVolume());
    OMDBUpdateEvent keyPutEvent = events.get(1);
    assertEquals(PUT, keyPutEvent.getAction());
    assertEquals("/sampleVol/bucketOne/key", keyPutEvent.getKey());
    assertEquals("key", ((OmKeyInfo) keyPutEvent.getValue()).getKeyName());
    assertNull(keyPutEvent.getOldValue());
    OMDBUpdateEvent keyUpdateEvent = events.get(2);
    assertEquals(UPDATE, keyUpdateEvent.getAction());
    assertEquals("/sampleVol/bucketOne/key", keyUpdateEvent.getKey());
    assertEquals("key_new", ((OmKeyInfo) keyUpdateEvent.getValue()).getKeyName());
    assertNotNull(keyUpdateEvent.getOldValue());
    assertEquals("key", ((OmKeyInfo) keyUpdateEvent.getOldValue()).getKeyName());
    OMDBUpdateEvent keyUpdateEvent2 = events.get(3);
    assertEquals(UPDATE, keyUpdateEvent2.getAction());
    assertEquals("/sampleVol/bucketOne/key", keyUpdateEvent2.getKey());
    assertEquals("key_new2", ((OmKeyInfo) keyUpdateEvent2.getValue()).getKeyName());
    assertNotNull(keyUpdateEvent2.getOldValue());
    assertEquals("key_new", ((OmKeyInfo) keyUpdateEvent2.getOldValue()).getKeyName());
    OMDBUpdateEvent keyDeleteEvent = events.get(4);
    assertEquals(DELETE, keyDeleteEvent.getAction());
    assertEquals("/sampleVol/bucketOne/key", keyDeleteEvent.getKey());
    assertEquals("key_new2", ((OmKeyInfo) keyDeleteEvent.getValue()).getKeyName());
    OMDBUpdateEvent keyDeleteEvent2 = events.get(5);
    assertEquals(DELETE, keyDeleteEvent2.getAction());
    assertEquals("/sampleVol/bucketOne/key", keyDeleteEvent2.getKey());
    assertEquals("key_new2", ((OmKeyInfo) keyDeleteEvent2.getValue()).getKeyName());
    OMDBUpdateEvent keyPut2 = events.get(6);
    assertEquals(PUT, keyPut2.getAction());
    assertEquals("/sampleVol/bucketOne/key", keyPut2.getKey());
    assertEquals("key_new2", ((OmKeyInfo) keyPut2.getValue()).getKeyName());
    assertNotNull(keyPut2.getOldValue());
    assertEquals("key_new2", ((OmKeyInfo) keyPut2.getOldValue()).getKeyName());
}
Also used : OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) Test(org.junit.Test)

Example 28 with OmKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.

the class TestCloseContainerHandlingByClient method testBlockWritesCloseConsistency.

@Test
public void testBlockWritesCloseConsistency() throws Exception {
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
    // write data more than 1 chunk
    byte[] data = ContainerTestHelper.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes(UTF_8);
    key.write(data);
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    // get the name of a valid container
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
    waitForContainerClose(key);
    key.close();
    // read the key from OM again and match the length.The length will still
    // be the equal to the original data size.
    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    Assert.assertEquals(data.length, keyInfo.getDataSize());
    validateData(keyName, data);
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) Test(org.junit.Test)

Example 29 with OmKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.

the class TestCloseContainerHandlingByClient method testBlockWritesWithFlushAndClose.

@Test
public void testBlockWritesWithFlushAndClose() throws Exception {
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
    // write data more than 1 chunk
    byte[] data = ContainerTestHelper.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes(UTF_8);
    key.write(data);
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    // get the name of a valid container
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
    waitForContainerClose(key);
    key.write(data);
    key.flush();
    key.close();
    // read the key from OM again and match the length.The length will still
    // be the equal to the original data size.
    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
    // Written the same data twice
    String dataString = new String(data, UTF_8);
    dataString = dataString.concat(dataString);
    validateData(keyName, dataString.getBytes(UTF_8));
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) Test(org.junit.Test)

Example 30 with OmKeyInfo

use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.

the class TestCloseContainerHandlingByClient method testMultiBlockWrites.

@Test
public void testMultiBlockWrites() throws Exception {
    String keyName = getKeyName();
    OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, (3 * blockSize));
    KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
    // With the initial size provided, it should have preallocated 4 blocks
    Assert.assertEquals(3, keyOutputStream.getStreamEntries().size());
    // write data more than 1 block
    byte[] data = ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize)).getBytes(UTF_8);
    Assert.assertEquals(data.length, 3 * blockSize);
    key.write(data);
    Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
    // get the name of a valid container
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
    waitForContainerClose(key);
    // write 1 more block worth of data. It will fail and new block will be
    // allocated
    key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize).getBytes(UTF_8));
    key.close();
    // read the key from OM again and match the length.The length will still
    // be the equal to the original data size.
    OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
    List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
    // Though we have written only block initially, the close will hit
    // closeContainerException and remaining data in the chunkOutputStream
    // buffer will be copied into a different allocated block and will be
    // committed.
    Assert.assertEquals(4, keyLocationInfos.size());
    Assert.assertEquals(4 * blockSize, keyInfo.getDataSize());
    for (OmKeyLocationInfo locationInfo : keyLocationInfos) {
        Assert.assertEquals(blockSize, locationInfo.getLength());
    }
}
Also used : OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) OzoneOutputStream(org.apache.hadoop.ozone.client.io.OzoneOutputStream) KeyOutputStream(org.apache.hadoop.ozone.client.io.KeyOutputStream) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Test(org.junit.Test)

Aggregations

OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)281 Test (org.junit.Test)96 RepeatedOmKeyInfo (org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo)86 ArrayList (java.util.ArrayList)62 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)59 IOException (java.io.IOException)58 OMClientResponse (org.apache.hadoop.ozone.om.response.OMClientResponse)48 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)47 OMException (org.apache.hadoop.ozone.om.exceptions.OMException)40 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)37 OMMetadataManager (org.apache.hadoop.ozone.om.OMMetadataManager)33 OmKeyLocationInfoGroup (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup)33 OzoneManagerProtocolProtos (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos)32 OmDirectoryInfo (org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo)30 OMResponse (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse)30 HashMap (java.util.HashMap)28 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)27 OMRequest (org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest)25 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)23 Map (java.util.Map)22