use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestKeyManagerImpl method openKeyWithMultipleBlocks.
@Test
public void openKeyWithMultipleBlocks() throws IOException {
OmKeyArgs keyArgs = createBuilder().setKeyName(UUID.randomUUID().toString()).setDataSize(scmBlockSize * 10).build();
OpenKeySession keySession = writeClient.openKey(keyArgs);
OmKeyInfo keyInfo = keySession.getKeyInfo();
Assert.assertEquals(10, keyInfo.getLatestVersionLocations().getLocationList().size());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestKeyManagerImpl method testRefreshPipeline.
@Test
public void testRefreshPipeline() throws Exception {
OzoneManager ozoneManager = om;
StorageContainerLocationProtocol sclProtocolMock = mock(StorageContainerLocationProtocol.class);
List<Long> containerIDs = new ArrayList<>();
containerIDs.add(100L);
containerIDs.add(200L);
List<ContainerWithPipeline> cps = new ArrayList<>();
for (Long containerID : containerIDs) {
ContainerWithPipeline containerWithPipelineMock = mock(ContainerWithPipeline.class);
when(containerWithPipelineMock.getPipeline()).thenReturn(getRandomPipeline());
ContainerInfo ci = mock(ContainerInfo.class);
when(ci.getContainerID()).thenReturn(containerID);
when(containerWithPipelineMock.getContainerInfo()).thenReturn(ci);
cps.add(containerWithPipelineMock);
}
when(sclProtocolMock.getContainerWithPipelineBatch(containerIDs)).thenReturn(cps);
ScmClient scmClientMock = mock(ScmClient.class);
when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock);
OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", "b1", "k1", ReplicationType.RATIS, ReplicationFactor.THREE);
// Add block to key.
List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
Pipeline pipeline = getRandomPipeline();
OmKeyLocationInfo omKeyLocationInfo = new OmKeyLocationInfo.Builder().setBlockID(new BlockID(100L, 1000L)).setOffset(0).setLength(100L).setPipeline(pipeline).build();
omKeyLocationInfoList.add(omKeyLocationInfo);
OmKeyLocationInfo omKeyLocationInfo2 = new OmKeyLocationInfo.Builder().setBlockID(new BlockID(200L, 1000L)).setOffset(0).setLength(100L).setPipeline(pipeline).build();
omKeyLocationInfoList.add(omKeyLocationInfo2);
OmKeyLocationInfo omKeyLocationInfo3 = new OmKeyLocationInfo.Builder().setBlockID(new BlockID(100L, 2000L)).setOffset(0).setLength(100L).setPipeline(pipeline).build();
omKeyLocationInfoList.add(omKeyLocationInfo3);
omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false);
KeyManagerImpl keyManagerImpl = new KeyManagerImpl(ozoneManager, scmClientMock, conf, "om1");
keyManagerImpl.refresh(omKeyInfo);
verify(sclProtocolMock, times(1)).getContainerWithPipelineBatch(containerIDs);
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestKeyManagerImpl method testLookupKeyWithLocation.
@Test
public void testLookupKeyWithLocation() throws IOException {
String keyName = RandomStringUtils.randomAlphabetic(5);
OmKeyArgs keyArgs = createBuilder().setKeyName(keyName).setSortDatanodesInPipeline(true).build();
// lookup for a non-existent key
try {
keyManager.lookupKey(keyArgs, null);
Assert.fail("Lookup key should fail for non existent key");
} catch (OMException ex) {
if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) {
throw ex;
}
}
// create a key
OpenKeySession keySession = writeClient.createFile(keyArgs, false, false);
// randomly select 3 datanodes
List<DatanodeDetails> nodeList = new ArrayList<>();
nodeList.add((DatanodeDetails) scm.getClusterMap().getNode(0, null, null, null, null, 0));
nodeList.add((DatanodeDetails) scm.getClusterMap().getNode(1, null, null, null, null, 0));
nodeList.add((DatanodeDetails) scm.getClusterMap().getNode(2, null, null, null, null, 0));
Assume.assumeFalse(nodeList.get(0).equals(nodeList.get(1)));
Assume.assumeFalse(nodeList.get(0).equals(nodeList.get(2)));
// create a pipeline using 3 datanodes
Pipeline pipeline = scm.getPipelineManager().createPipeline(RatisReplicationConfig.getInstance(ReplicationFactor.THREE), nodeList);
List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
List<OmKeyLocationInfo> locationList = keySession.getKeyInfo().getLatestVersionLocations().getLocationList();
Assert.assertEquals(1, locationList.size());
locationInfoList.add(new OmKeyLocationInfo.Builder().setPipeline(pipeline).setBlockID(new BlockID(locationList.get(0).getContainerID(), locationList.get(0).getLocalID())).build());
keyArgs.setLocationInfoList(locationInfoList);
writeClient.commitKey(keyArgs, keySession.getId());
ContainerInfo containerInfo = new ContainerInfo.Builder().setContainerID(1L).setPipelineID(pipeline.getId()).build();
List<ContainerWithPipeline> containerWithPipelines = Arrays.asList(new ContainerWithPipeline(containerInfo, pipeline));
when(mockScmContainerClient.getContainerWithPipelineBatch(Arrays.asList(1L))).thenReturn(containerWithPipelines);
OmKeyInfo key = keyManager.lookupKey(keyArgs, null);
Assert.assertEquals(key.getKeyName(), keyName);
List<OmKeyLocationInfo> keyLocations = key.getLatestVersionLocations().getLocationList();
DatanodeDetails leader = keyLocations.get(0).getPipeline().getFirstNode();
DatanodeDetails follower1 = keyLocations.get(0).getPipeline().getNodes().get(1);
DatanodeDetails follower2 = keyLocations.get(0).getPipeline().getNodes().get(2);
Assert.assertNotEquals(leader, follower1);
Assert.assertNotEquals(follower1, follower2);
// lookup key, leader as client
OmKeyInfo key1 = keyManager.lookupKey(keyArgs, leader.getIpAddress());
Assert.assertEquals(leader, key1.getLatestVersionLocations().getLocationList().get(0).getPipeline().getClosestNode());
// lookup key, follower1 as client
OmKeyInfo key2 = keyManager.lookupKey(keyArgs, follower1.getIpAddress());
Assert.assertEquals(follower1, key2.getLatestVersionLocations().getLocationList().get(0).getPipeline().getClosestNode());
// lookup key, follower2 as client
OmKeyInfo key3 = keyManager.lookupKey(keyArgs, follower2.getIpAddress());
Assert.assertEquals(follower2, key3.getLatestVersionLocations().getLocationList().get(0).getPipeline().getClosestNode());
// lookup key, random node as client
OmKeyInfo key4 = keyManager.lookupKey(keyArgs, "/d=default-drack/127.0.0.1");
Assert.assertEquals(leader, key4.getLatestVersionLocations().getLocationList().get(0).getPipeline().getClosestNode());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestOMEpochForNonRatis method testUniqueTrxnIndexOnOMRestart.
@Test
public void testUniqueTrxnIndexOnOMRestart() throws Exception {
// When OM is restarted, the transaction index for requests should not
// start from 0. It should incrementally increase from the last
// transaction index which was stored in DB before restart.
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
String keyName = "key" + RandomStringUtils.randomNumeric(5);
OzoneManager om = cluster.getOzoneManager();
OzoneClient client = cluster.getClient();
ObjectStore objectStore = client.getObjectStore();
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
OzoneManagerProtocolClientSideTranslatorPB omClient = new OzoneManagerProtocolClientSideTranslatorPB(OmTransportFactory.create(conf, ugi, null), RandomStringUtils.randomAscii(5));
objectStore.createVolume(volumeName);
// Verify that the last transactionIndex stored in DB after volume
// creation equals the transaction index corresponding to volume's
// objectID. Also, the volume transaction index should be 1 as this is
// the first transaction in this cluster.
OmVolumeArgs volumeInfo = omClient.getVolumeInfo(volumeName);
long volumeTrxnIndex = OmUtils.getTxIdFromObjectId(volumeInfo.getObjectID());
Assert.assertEquals(1, volumeTrxnIndex);
Assert.assertEquals(volumeTrxnIndex, om.getLastTrxnIndexForNonRatis());
OzoneVolume ozoneVolume = objectStore.getVolume(volumeName);
ozoneVolume.createBucket(bucketName);
// Verify last transactionIndex is updated after bucket creation
OmBucketInfo bucketInfo = omClient.getBucketInfo(volumeName, bucketName);
long bucketTrxnIndex = OmUtils.getTxIdFromObjectId(bucketInfo.getObjectID());
Assert.assertEquals(2, bucketTrxnIndex);
Assert.assertEquals(bucketTrxnIndex, om.getLastTrxnIndexForNonRatis());
// Restart the OM and create new object
cluster.restartOzoneManager();
String data = "random data";
OzoneOutputStream ozoneOutputStream = ozoneVolume.getBucket(bucketName).createKey(keyName, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>());
ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length());
ozoneOutputStream.close();
// Verify last transactionIndex is updated after key creation and the
// transaction index after restart is incremented from the last
// transaction index before restart.
OmKeyInfo omKeyInfo = omClient.lookupKey(new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build());
long keyTrxnIndex = OmUtils.getTxIdFromObjectId(omKeyInfo.getObjectID());
Assert.assertEquals(3, keyTrxnIndex);
// Key commit is a separate transaction. Hence, the last trxn index in DB
// should be 1 more than KeyTrxnIndex
Assert.assertEquals(4, om.getLastTrxnIndexForNonRatis());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestObjectStoreWithFSO method verifyKeyInFileTable.
private void verifyKeyInFileTable(Table<String, OmKeyInfo> fileTable, String fileName, long parentID, boolean isEmpty) throws IOException {
String dbFileKey = parentID + OM_KEY_PREFIX + fileName;
OmKeyInfo omKeyInfo = fileTable.get(dbFileKey);
if (isEmpty) {
Assert.assertNull("Table is not empty!", omKeyInfo);
} else {
Assert.assertNotNull("Table is empty!", omKeyInfo);
// used startsWith because the key format is,
// <parentID>/fileName/<clientID> and clientID is not visible.
Assert.assertEquals("Invalid Key: " + omKeyInfo.getObjectInfo(), omKeyInfo.getKeyName(), fileName);
Assert.assertEquals("Invalid Key", parentID, omKeyInfo.getParentObjectID());
}
}
Aggregations