use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestOMDBUpdatesHandler method testDelete.
@Test
public void testDelete() throws Exception {
// Write 1 volume, 1 key into source and target OM DBs.
String volumeKey = omMetadataManager.getVolumeKey("sampleVol");
String nonExistVolumeKey = omMetadataManager.getVolumeKey("nonExistingVolume");
OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol").setAdminName("bilbo").setOwnerName("bilbo").build();
omMetadataManager.getVolumeTable().put(volumeKey, args);
reconOmMetadataManager.getVolumeTable().put(volumeKey, args);
OmKeyInfo omKeyInfo = getOmKeyInfo("sampleVol", "bucketOne", "key_one");
omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key_one", omKeyInfo);
reconOmMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key_one", omKeyInfo);
// Delete the volume and key from target DB.
omMetadataManager.getKeyTable(getBucketLayout()).delete("/sampleVol/bucketOne/key_one");
omMetadataManager.getVolumeTable().delete(volumeKey);
// Delete a non-existing volume and key
omMetadataManager.getKeyTable(getBucketLayout()).delete("/sampleVol/bucketOne/key_two");
omMetadataManager.getVolumeTable().delete(omMetadataManager.getVolumeKey("nonExistingVolume"));
List<byte[]> writeBatches = getBytesFromOmMetaManager(3);
OMDBUpdatesHandler omdbUpdatesHandler = captureEvents(writeBatches);
List<OMDBUpdateEvent> events = omdbUpdatesHandler.getEvents();
assertEquals(4, events.size());
OMDBUpdateEvent keyEvent = events.get(0);
assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, keyEvent.getAction());
assertEquals("/sampleVol/bucketOne/key_one", keyEvent.getKey());
assertEquals(omKeyInfo, keyEvent.getValue());
OMDBUpdateEvent volEvent = events.get(1);
assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, volEvent.getAction());
assertEquals(volumeKey, volEvent.getKey());
assertNotNull(volEvent.getValue());
OmVolumeArgs volumeInfo = (OmVolumeArgs) volEvent.getValue();
assertEquals("sampleVol", volumeInfo.getVolume());
// Assert the values of non existent keys are set to null.
OMDBUpdateEvent nonExistKey = events.get(2);
assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, nonExistKey.getAction());
assertEquals("/sampleVol/bucketOne/key_two", nonExistKey.getKey());
assertNull(nonExistKey.getValue());
OMDBUpdateEvent nonExistVolume = events.get(3);
assertEquals(OMDBUpdateEvent.OMDBUpdateAction.DELETE, nonExistVolume.getAction());
assertEquals(nonExistVolumeKey, nonExistVolume.getKey());
assertNull(nonExistVolume.getValue());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestOMDBUpdatesHandler method testOperateOnSameEntry.
@Test
public void testOperateOnSameEntry() throws Exception {
// Create 1 volume, 1 key and write to source OM DB.
String volumeKey = omMetadataManager.getVolumeKey("sampleVol");
OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol").setAdminName("bilbo").setOwnerName("bilbo").build();
omMetadataManager.getVolumeTable().put(volumeKey, args);
OmKeyInfo key = getOmKeyInfo("sampleVol", "bucketOne", "key");
omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key", key);
OmKeyInfo keyNewValue = getOmKeyInfo("sampleVol", "bucketOne", "key_new");
omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key", keyNewValue);
OmKeyInfo keyNewValue2 = getOmKeyInfo("sampleVol", "bucketOne", "key_new2");
omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key", keyNewValue2);
omMetadataManager.getKeyTable(getBucketLayout()).delete("/sampleVol/bucketOne/key");
omMetadataManager.getKeyTable(getBucketLayout()).delete("/sampleVol/bucketOne/key");
omMetadataManager.getKeyTable(getBucketLayout()).put("/sampleVol/bucketOne/key", keyNewValue2);
List<byte[]> writeBatches = getBytesFromOmMetaManager(0);
OMDBUpdatesHandler omdbUpdatesHandler = captureEvents(writeBatches);
List<OMDBUpdateEvent> events = omdbUpdatesHandler.getEvents();
assertEquals(7, events.size());
OMDBUpdateEvent volEvent = events.get(0);
assertEquals(PUT, volEvent.getAction());
assertEquals(volumeKey, volEvent.getKey());
assertEquals(args.getVolume(), ((OmVolumeArgs) volEvent.getValue()).getVolume());
OMDBUpdateEvent keyPutEvent = events.get(1);
assertEquals(PUT, keyPutEvent.getAction());
assertEquals("/sampleVol/bucketOne/key", keyPutEvent.getKey());
assertEquals("key", ((OmKeyInfo) keyPutEvent.getValue()).getKeyName());
assertNull(keyPutEvent.getOldValue());
OMDBUpdateEvent keyUpdateEvent = events.get(2);
assertEquals(UPDATE, keyUpdateEvent.getAction());
assertEquals("/sampleVol/bucketOne/key", keyUpdateEvent.getKey());
assertEquals("key_new", ((OmKeyInfo) keyUpdateEvent.getValue()).getKeyName());
assertNotNull(keyUpdateEvent.getOldValue());
assertEquals("key", ((OmKeyInfo) keyUpdateEvent.getOldValue()).getKeyName());
OMDBUpdateEvent keyUpdateEvent2 = events.get(3);
assertEquals(UPDATE, keyUpdateEvent2.getAction());
assertEquals("/sampleVol/bucketOne/key", keyUpdateEvent2.getKey());
assertEquals("key_new2", ((OmKeyInfo) keyUpdateEvent2.getValue()).getKeyName());
assertNotNull(keyUpdateEvent2.getOldValue());
assertEquals("key_new", ((OmKeyInfo) keyUpdateEvent2.getOldValue()).getKeyName());
OMDBUpdateEvent keyDeleteEvent = events.get(4);
assertEquals(DELETE, keyDeleteEvent.getAction());
assertEquals("/sampleVol/bucketOne/key", keyDeleteEvent.getKey());
assertEquals("key_new2", ((OmKeyInfo) keyDeleteEvent.getValue()).getKeyName());
OMDBUpdateEvent keyDeleteEvent2 = events.get(5);
assertEquals(DELETE, keyDeleteEvent2.getAction());
assertEquals("/sampleVol/bucketOne/key", keyDeleteEvent2.getKey());
assertEquals("key_new2", ((OmKeyInfo) keyDeleteEvent2.getValue()).getKeyName());
OMDBUpdateEvent keyPut2 = events.get(6);
assertEquals(PUT, keyPut2.getAction());
assertEquals("/sampleVol/bucketOne/key", keyPut2.getKey());
assertEquals("key_new2", ((OmKeyInfo) keyPut2.getValue()).getKeyName());
assertNotNull(keyPut2.getOldValue());
assertEquals("key_new2", ((OmKeyInfo) keyPut2.getOldValue()).getKeyName());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestCloseContainerHandlingByClient method testBlockWritesCloseConsistency.
@Test
public void testBlockWritesCloseConsistency() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
// write data more than 1 chunk
byte[] data = ContainerTestHelper.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes(UTF_8);
key.write(data);
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
waitForContainerClose(key);
key.close();
// read the key from OM again and match the length.The length will still
// be the equal to the original data size.
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
Assert.assertEquals(data.length, keyInfo.getDataSize());
validateData(keyName, data);
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestCloseContainerHandlingByClient method testBlockWritesWithFlushAndClose.
@Test
public void testBlockWritesWithFlushAndClose() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0);
// write data more than 1 chunk
byte[] data = ContainerTestHelper.getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes(UTF_8);
key.write(data);
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
waitForContainerClose(key);
key.write(data);
key.flush();
key.close();
// read the key from OM again and match the length.The length will still
// be the equal to the original data size.
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
Assert.assertEquals(2 * data.length, keyInfo.getDataSize());
// Written the same data twice
String dataString = new String(data, UTF_8);
dataString = dataString.concat(dataString);
validateData(keyName, dataString.getBytes(UTF_8));
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestCloseContainerHandlingByClient method testMultiBlockWrites.
@Test
public void testMultiBlockWrites() throws Exception {
String keyName = getKeyName();
OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, (3 * blockSize));
KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream();
// With the initial size provided, it should have preallocated 4 blocks
Assert.assertEquals(3, keyOutputStream.getStreamEntries().size());
// write data more than 1 block
byte[] data = ContainerTestHelper.getFixedLengthString(keyString, (3 * blockSize)).getBytes(UTF_8);
Assert.assertEquals(data.length, 3 * blockSize);
key.write(data);
Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream);
// get the name of a valid container
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
waitForContainerClose(key);
// write 1 more block worth of data. It will fail and new block will be
// allocated
key.write(ContainerTestHelper.getFixedLengthString(keyString, blockSize).getBytes(UTF_8));
key.close();
// read the key from OM again and match the length.The length will still
// be the equal to the original data size.
OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
// Though we have written only block initially, the close will hit
// closeContainerException and remaining data in the chunkOutputStream
// buffer will be copied into a different allocated block and will be
// committed.
Assert.assertEquals(4, keyLocationInfos.size());
Assert.assertEquals(4 * blockSize, keyInfo.getDataSize());
for (OmKeyLocationInfo locationInfo : keyLocationInfos) {
Assert.assertEquals(blockSize, locationInfo.getLength());
}
}
Aggregations