use of org.apache.hadoop.ozone.om.helpers.OmKeyArgs in project ozone by apache.
the class TestStorageContainerManagerHA method testPutKey.
public void testPutKey() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
Instant testStartTime = Instant.now();
ObjectStore store = OzoneClientFactory.getRpcClient(cluster.getConf()).getObjectStore();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
is.close();
final OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
final List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
long index = -1;
for (StorageContainerManager scm : cluster.getStorageContainerManagers()) {
if (scm.checkLeader()) {
index = getLastAppliedIndex(scm);
}
}
Assert.assertFalse(index == -1);
long finalIndex = index;
// Ensure all follower scms have caught up with the leader
GenericTestUtils.waitFor(() -> areAllScmInSync(finalIndex), 100, 10000);
final long containerID = keyLocationInfos.get(0).getContainerID();
for (int k = 0; k < numOfSCMs; k++) {
StorageContainerManager scm = cluster.getStorageContainerManagers().get(k);
// flush to DB on each SCM
((SCMRatisServerImpl) scm.getScmHAManager().getRatisServer()).getStateMachine().takeSnapshot();
Assert.assertTrue(scm.getContainerManager().containerExist(ContainerID.valueOf(containerID)));
Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(ContainerID.valueOf(containerID)));
}
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyArgs in project ozone by apache.
the class ChunkKeyHandler method execute.
@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
containerOperationClient = new ContainerOperationClient(createOzoneConfiguration());
xceiverClientManager = containerOperationClient.getXceiverClientManager();
ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient();
address.ensureKeyAddress();
JsonElement element;
JsonObject result = new JsonObject();
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
String keyName = address.getKeyName();
List<ContainerProtos.ChunkInfo> tempchunks = null;
List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
HashSet<String> chunkPaths = new HashSet<>();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build();
OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
// querying the keyLocations.The OM is queried to get containerID and
// localID pertaining to a given key
List<OmKeyLocationInfo> locationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
// for zero-sized key
if (locationInfos.isEmpty()) {
System.out.println("No Key Locations Found");
return;
}
ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion.getConfiguredVersion(getConf());
JsonArray responseArrayList = new JsonArray();
for (OmKeyLocationInfo keyLocation : locationInfos) {
ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
long containerId = keyLocation.getContainerID();
chunkPaths.clear();
Pipeline pipeline = keyLocation.getPipeline();
if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
}
xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
// Datanode is queried to get chunk information.Thus querying the
// OM,SCM and datanode helps us get chunk location information
ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
// doing a getBlock on all nodes
HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses = null;
try {
responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, datanodeBlockID, keyLocation.getToken());
} catch (InterruptedException e) {
LOG.error("Execution interrupted due to " + e);
Thread.currentThread().interrupt();
}
JsonArray responseFromAllNodes = new JsonArray();
for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto> entry : responses.entrySet()) {
JsonObject jsonObj = new JsonObject();
if (entry.getValue() == null) {
LOG.error("Cant execute getBlock on this node");
continue;
}
tempchunks = entry.getValue().getBlockData().getChunksList();
ContainerProtos.ContainerDataProto containerData = containerOperationClient.readContainer(keyLocation.getContainerID(), keyLocation.getPipeline());
for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
String fileName = containerLayoutVersion.getChunkFile(new File(getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
chunkPaths.add(fileName);
ChunkDetails chunkDetails = new ChunkDetails();
chunkDetails.setChunkName(fileName);
chunkDetails.setChunkOffset(chunkInfo.getOffset());
chunkDetailsList.add(chunkDetails);
}
containerChunkInfoVerbose.setContainerPath(containerData.getContainerPath());
containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
containerChunkInfo.setFiles(chunkPaths);
containerChunkInfo.setPipelineID(keyLocation.getPipeline().getId().getId());
Gson gson = new GsonBuilder().create();
if (isVerbose()) {
element = gson.toJsonTree(containerChunkInfoVerbose);
} else {
element = gson.toJsonTree(containerChunkInfo);
}
jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
jsonObj.addProperty("Container-ID", containerId);
jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
jsonObj.add("Locations", element);
responseFromAllNodes.add(jsonObj);
xceiverClientManager.releaseClientForReadData(xceiverClient, false);
}
responseArrayList.add(responseFromAllNodes);
}
result.add("KeyLocations", responseArrayList);
Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
String prettyJson = gson2.toJson(result);
System.out.println(prettyJson);
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyArgs in project ozone by apache.
the class TestKeyManagerImpl method testCreateDirectory.
@Test
public void testCreateDirectory() throws IOException {
// Create directory where the parent directory does not exist
StringBuffer keyNameBuf = new StringBuffer();
keyNameBuf.append(RandomStringUtils.randomAlphabetic(5));
OmKeyArgs keyArgs = createBuilder().setKeyName(keyNameBuf.toString()).build();
for (int i = 0; i < 5; i++) {
keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5));
}
String keyName = keyNameBuf.toString();
writeClient.createDirectory(keyArgs);
Path path = Paths.get(keyName);
while (path != null) {
// verify parent directories are created
Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory());
path = path.getParent();
}
// make sure create directory fails where parent is a file
keyName = RandomStringUtils.randomAlphabetic(5);
keyArgs = createBuilder().setKeyName(keyName).build();
OpenKeySession keySession = writeClient.openKey(keyArgs);
keyArgs.setLocationInfoList(keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
writeClient.commitKey(keyArgs, keySession.getId());
try {
writeClient.createDirectory(keyArgs);
Assert.fail("Creation should fail for directory.");
} catch (OMException e) {
Assert.assertEquals(e.getResult(), OMException.ResultCodes.FILE_ALREADY_EXISTS);
}
// create directory where parent is root
keyName = RandomStringUtils.randomAlphabetic(5);
keyArgs = createBuilder().setKeyName(keyName).build();
writeClient.createDirectory(keyArgs);
OzoneFileStatus fileStatus = keyManager.getFileStatus(keyArgs);
Assert.assertTrue(fileStatus.isDirectory());
Assert.assertTrue(fileStatus.getKeyInfo().getKeyLocationVersions().get(0).getLocationList().isEmpty());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyArgs in project ozone by apache.
the class TestKeyManagerImpl method testCheckAccessForFileKey.
@Test
public void testCheckAccessForFileKey() throws Exception {
// GIVEN
OmKeyArgs keyArgs = createBuilder().setKeyName("testdir/deep/NOTICE.txt").build();
OpenKeySession keySession = writeClient.createFile(keyArgs, false, true);
keyArgs.setLocationInfoList(keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
writeClient.commitKey(keyArgs, keySession.getId());
reset(mockScmContainerClient);
OzoneObj fileKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs).setStoreType(OzoneObj.StoreType.OZONE).build();
RequestContext context = currentUserReads();
// WHEN
boolean access = keyManager.checkAccess(fileKey, context);
// THEN
Assert.assertTrue(access);
verify(mockScmContainerClient, never()).getContainerWithPipelineBatch(any());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyArgs in project ozone by apache.
the class TestKeyManagerImpl method testListStatusWithDeletedEntriesInCache.
@Test
public void testListStatusWithDeletedEntriesInCache() throws Exception {
String prefixKey = "key-";
TreeSet<String> existKeySet = new TreeSet<>();
TreeSet<String> deletedKeySet = new TreeSet<>();
for (int i = 1; i <= 100; i++) {
if (i % 2 == 0) {
OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKey + i, 1000L, HddsProtos.ReplicationType.RATIS, ONE, metadataManager);
existKeySet.add(prefixKey + i);
} else {
OMRequestTestUtils.addKeyToTableCache(VOLUME_NAME, BUCKET_NAME, prefixKey + i, HddsProtos.ReplicationType.RATIS, ONE, metadataManager);
String key = metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, prefixKey + i);
// Mark as deleted in cache.
metadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry(new CacheKey<>(key), new CacheValue<>(Optional.absent(), 2L));
deletedKeySet.add(key);
}
}
OmKeyArgs rootDirArgs = createKeyArgs("");
List<OzoneFileStatus> fileStatuses = keyManager.listStatus(rootDirArgs, true, "", 1000);
// Should only get entries that are not marked as deleted.
Assert.assertEquals(50, fileStatuses.size());
// Test startKey
fileStatuses = keyManager.listStatus(rootDirArgs, true, prefixKey, 1000);
// Should only get entries that are not marked as deleted.
Assert.assertEquals(50, fileStatuses.size());
// Verify result
TreeSet<String> expectedKeys = new TreeSet<>();
for (OzoneFileStatus fileStatus : fileStatuses) {
String keyName = fileStatus.getKeyInfo().getKeyName();
expectedKeys.add(keyName);
Assert.assertTrue(keyName.startsWith(prefixKey));
}
Assert.assertEquals(expectedKeys, existKeySet);
// Sanity check, existKeySet should not intersect with deletedKeySet.
Assert.assertEquals(0, Sets.intersection(existKeySet, deletedKeySet).size());
// Next, mark half of the entries left as deleted
boolean doDelete = false;
for (String key : existKeySet) {
if (doDelete) {
String ozoneKey = metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, key);
metadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry(new CacheKey<>(ozoneKey), new CacheValue<>(Optional.absent(), 2L));
deletedKeySet.add(key);
}
doDelete = !doDelete;
}
// Update existKeySet
existKeySet.removeAll(deletedKeySet);
fileStatuses = keyManager.listStatus(rootDirArgs, true, "", 1000);
// Should only get entries that are not marked as deleted.
Assert.assertEquals(50 / 2, fileStatuses.size());
// Verify result
expectedKeys.clear();
for (OzoneFileStatus fileStatus : fileStatuses) {
String keyName = fileStatus.getKeyInfo().getKeyName();
expectedKeys.add(keyName);
Assert.assertTrue(keyName.startsWith(prefixKey));
}
Assert.assertEquals(expectedKeys, existKeySet);
// Test pagination
final int batchSize = 5;
String startKey = "";
expectedKeys.clear();
do {
fileStatuses = keyManager.listStatus(rootDirArgs, true, startKey, batchSize);
// This is fine as we are using a set to store results.
for (OzoneFileStatus fileStatus : fileStatuses) {
startKey = fileStatus.getKeyInfo().getKeyName();
expectedKeys.add(startKey);
Assert.assertTrue(startKey.startsWith(prefixKey));
}
// fileStatuses.size() == batchSize indicates there might be another batch
// fileStatuses.size() < batchSize indicates it is the last batch
} while (fileStatuses.size() == batchSize);
Assert.assertEquals(expectedKeys, existKeySet);
// Clean up by marking remaining entries as deleted
for (String key : existKeySet) {
String ozoneKey = metadataManager.getOzoneKey(VOLUME_NAME, BUCKET_NAME, key);
metadataManager.getKeyTable(getDefaultBucketLayout()).addCacheEntry(new CacheKey<>(ozoneKey), new CacheValue<>(Optional.absent(), 2L));
deletedKeySet.add(key);
}
// Update existKeySet
existKeySet.removeAll(deletedKeySet);
Assert.assertTrue(existKeySet.isEmpty());
}
Aggregations