use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestReconWithOzoneManager method testOmDBSyncing.
@Test
public void testOmDBSyncing() throws Exception {
// add a vol, bucket and key
addKeys(0, 1);
// check if OM metadata has vol0/bucket0/key0 info
String ozoneKey = metadataManager.getOzoneKey("vol0", "bucket0", "key0");
OmKeyInfo keyInfo1 = metadataManager.getKeyTable(getBucketLayout()).get(ozoneKey);
TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> omKeyValueTableIterator = metadataManager.getKeyTable(getBucketLayout()).iterator();
long omMetadataKeyCount = getTableKeyCount(omKeyValueTableIterator);
// verify if OM has /vol0/bucket0/key0
Assert.assertEquals("vol0", keyInfo1.getVolumeName());
Assert.assertEquals("bucket0", keyInfo1.getBucketName());
OzoneManagerServiceProviderImpl impl = (OzoneManagerServiceProviderImpl) cluster.getReconServer().getOzoneManagerServiceProvider();
impl.syncDataFromOM();
OzoneManagerSyncMetrics metrics = impl.getMetrics();
// HTTP call to /api/containers
String containerResponse = makeHttpCall(containerKeyServiceURL);
long reconMetadataContainerCount = getReconContainerCount(containerResponse);
// verify count of keys after full snapshot
Assert.assertEquals(omMetadataKeyCount, reconMetadataContainerCount);
// verify if Recon Metadata captures vol0/bucket0/key0 info in container0
LinkedTreeMap containerResponseMap = getContainerResponseMap(containerResponse, 0);
Assert.assertEquals(0, (long) (double) containerResponseMap.get("ContainerID"));
Assert.assertEquals(1, (long) (double) containerResponseMap.get("NumberOfKeys"));
// HTTP call to /api/task/status
long omLatestSeqNumber = ((RDBStore) metadataManager.getStore()).getDb().getLatestSequenceNumber();
String taskStatusResponse = makeHttpCall(taskStatusURL);
long reconLatestSeqNumber = getReconTaskAttributeFromJson(taskStatusResponse, OmSnapshotRequest.name(), "lastUpdatedSeqNumber");
// verify sequence number after full snapshot
Assert.assertEquals(omLatestSeqNumber, reconLatestSeqNumber);
Assert.assertEquals(0, metrics.getSequenceNumberLag().value());
// add 4 keys to check for delta updates
addKeys(1, 5);
omKeyValueTableIterator = metadataManager.getKeyTable(getBucketLayout()).iterator();
omMetadataKeyCount = getTableKeyCount(omKeyValueTableIterator);
// update the next snapshot from om to verify delta updates
impl.syncDataFromOM();
// HTTP call to /api/containers
containerResponse = makeHttpCall(containerKeyServiceURL);
reconMetadataContainerCount = getReconContainerCount(containerResponse);
// verify count of keys
Assert.assertEquals(omMetadataKeyCount, reconMetadataContainerCount);
// verify if Recon Metadata captures vol3/bucket3/key3 info in container3
containerResponseMap = getContainerResponseMap(containerResponse, 3);
Assert.assertEquals(3, (long) (double) containerResponseMap.get("ContainerID"));
Assert.assertEquals(1, (long) (double) containerResponseMap.get("NumberOfKeys"));
// HTTP call to /api/task/status
omLatestSeqNumber = ((RDBStore) metadataManager.getStore()).getDb().getLatestSequenceNumber();
taskStatusResponse = makeHttpCall(taskStatusURL);
reconLatestSeqNumber = getReconTaskAttributeFromJson(taskStatusResponse, OmDeltaRequest.name(), "lastUpdatedSeqNumber");
// verify sequence number after Delta Updates
Assert.assertEquals(omLatestSeqNumber, reconLatestSeqNumber);
Assert.assertEquals(0, metrics.getSequenceNumberLag().value());
long beforeRestartSnapShotTimeStamp = getReconTaskAttributeFromJson(taskStatusResponse, OmSnapshotRequest.name(), "lastUpdatedTimestamp");
// restart Recon
cluster.restartReconServer();
impl = (OzoneManagerServiceProviderImpl) cluster.getReconServer().getOzoneManagerServiceProvider();
// add 5 more keys to OM
addKeys(5, 10);
omKeyValueTableIterator = metadataManager.getKeyTable(getBucketLayout()).iterator();
omMetadataKeyCount = getTableKeyCount(omKeyValueTableIterator);
// get the next snapshot from om
impl.syncDataFromOM();
// HTTP call to /api/containers
containerResponse = makeHttpCall(containerKeyServiceURL);
reconMetadataContainerCount = getReconContainerCount(containerResponse);
// verify count of keys
Assert.assertEquals(omMetadataKeyCount, reconMetadataContainerCount);
// verify if Recon Metadata captures vol7/bucket7/key7 info in container7
containerResponseMap = getContainerResponseMap(containerResponse, 7);
Assert.assertEquals(7, (long) (double) containerResponseMap.get("ContainerID"));
Assert.assertEquals(1, (long) (double) containerResponseMap.get("NumberOfKeys"));
// HTTP call to /api/task/status
omLatestSeqNumber = ((RDBStore) metadataManager.getStore()).getDb().getLatestSequenceNumber();
taskStatusResponse = makeHttpCall(taskStatusURL);
reconLatestSeqNumber = getReconTaskAttributeFromJson(taskStatusResponse, OmDeltaRequest.name(), "lastUpdatedSeqNumber");
long afterRestartSnapShotTimeStamp = getReconTaskAttributeFromJson(taskStatusResponse, OmSnapshotRequest.name(), "lastUpdatedTimestamp");
// verify only Delta updates were added to recon after restart.
Assert.assertEquals(beforeRestartSnapShotTimeStamp, afterRestartSnapShotTimeStamp);
// verify sequence number after Delta Updates
Assert.assertEquals(omLatestSeqNumber, reconLatestSeqNumber);
Assert.assertEquals(0, metrics.getSequenceNumberLag().value());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestStorageContainerManagerHA method testPutKey.
public void testPutKey() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();
Instant testStartTime = Instant.now();
ObjectStore store = OzoneClientFactory.getRpcClient(cluster.getConf()).getObjectStore();
String value = "sample value";
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
String keyName = UUID.randomUUID().toString();
OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>());
out.write(value.getBytes(UTF_8));
out.close();
OzoneKey key = bucket.getKey(keyName);
Assert.assertEquals(keyName, key.getName());
OzoneInputStream is = bucket.readKey(keyName);
byte[] fileContent = new byte[value.getBytes(UTF_8).length];
is.read(fileContent);
Assert.assertEquals(value, new String(fileContent, UTF_8));
Assert.assertFalse(key.getCreationTime().isBefore(testStartTime));
Assert.assertFalse(key.getModificationTime().isBefore(testStartTime));
is.close();
final OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setReplicationConfig(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE)).setKeyName(keyName).setRefreshPipeline(true).build();
final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs);
final List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly();
long index = -1;
for (StorageContainerManager scm : cluster.getStorageContainerManagers()) {
if (scm.checkLeader()) {
index = getLastAppliedIndex(scm);
}
}
Assert.assertFalse(index == -1);
long finalIndex = index;
// Ensure all follower scms have caught up with the leader
GenericTestUtils.waitFor(() -> areAllScmInSync(finalIndex), 100, 10000);
final long containerID = keyLocationInfos.get(0).getContainerID();
for (int k = 0; k < numOfSCMs; k++) {
StorageContainerManager scm = cluster.getStorageContainerManagers().get(k);
// flush to DB on each SCM
((SCMRatisServerImpl) scm.getScmHAManager().getRatisServer()).getStateMachine().takeSnapshot();
Assert.assertTrue(scm.getContainerManager().containerExist(ContainerID.valueOf(containerID)));
Assert.assertNotNull(scm.getScmMetadataStore().getContainerTable().get(ContainerID.valueOf(containerID)));
}
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestScmSafeMode method testSCMSafeMode.
@Test(timeout = 300_000)
public void testSCMSafeMode() throws Exception {
// Test1: Test safe mode when there are no containers in system.
cluster.stop();
try {
cluster = builder.build();
} catch (IOException e) {
Assert.fail("Cluster startup failed.");
}
assertTrue(cluster.getStorageContainerManager().isInSafeMode());
cluster.startHddsDatanodes();
cluster.waitForClusterToBeReady();
cluster.waitTobeOutOfSafeMode();
assertFalse(cluster.getStorageContainerManager().isInSafeMode());
// Test2: Test safe mode when containers are there in system.
// Create {numKeys} random names keys.
TestStorageContainerManagerHelper helper = new TestStorageContainerManagerHelper(cluster, conf);
Map<String, OmKeyInfo> keyLocations = helper.createKeys(100 * 2, 4096);
final List<ContainerInfo> containers = cluster.getStorageContainerManager().getContainerManager().getContainers();
GenericTestUtils.waitFor(() -> containers.size() >= 3, 100, 1000 * 30);
// Removing some container to keep them open.
containers.remove(0);
containers.remove(0);
// Close remaining containers
ContainerManager mapping = cluster.getStorageContainerManager().getContainerManager();
containers.forEach(c -> {
try {
mapping.updateContainerState(c.containerID(), HddsProtos.LifeCycleEvent.FINALIZE);
mapping.updateContainerState(c.containerID(), LifeCycleEvent.CLOSE);
} catch (IOException | InvalidStateTransitionException e) {
LOG.info("Failed to change state of open containers.", e);
}
});
cluster.stop();
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(SCMSafeModeManager.getLogger());
logCapturer.clearOutput();
try {
cluster = builder.build();
} catch (IOException ex) {
fail("failed");
}
StorageContainerManager scm;
scm = cluster.getStorageContainerManager();
assertTrue(scm.isInSafeMode());
assertFalse(logCapturer.getOutput().contains("SCM exiting safe mode."));
assertTrue(scm.getCurrentContainerThreshold() == 0);
for (HddsDatanodeService dn : cluster.getHddsDatanodes()) {
dn.start();
}
GenericTestUtils.waitFor(() -> scm.getCurrentContainerThreshold() == 1.0, 100, 20000);
EventQueue eventQueue = (EventQueue) cluster.getStorageContainerManager().getEventQueue();
eventQueue.processAll(5000L);
double safeModeCutoff = conf.getDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT);
assertTrue(scm.getCurrentContainerThreshold() >= safeModeCutoff);
assertTrue(logCapturer.getOutput().contains("SCM exiting safe mode."));
assertFalse(scm.isInSafeMode());
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class TestScmSafeMode method testSafeModeOperations.
@Test(timeout = 300_000)
public void testSafeModeOperations() throws Exception {
// Create {numKeys} random names keys.
TestStorageContainerManagerHelper helper = new TestStorageContainerManagerHelper(cluster, conf);
Map<String, OmKeyInfo> keyLocations = helper.createKeys(100, 4096);
final List<ContainerInfo> containers = cluster.getStorageContainerManager().getContainerManager().getContainers();
GenericTestUtils.waitFor(() -> containers.size() >= 3, 100, 1000);
String volumeName = "volume" + RandomStringUtils.randomNumeric(5);
String bucketName = "bucket" + RandomStringUtils.randomNumeric(5);
String keyName = "key" + RandomStringUtils.randomNumeric(5);
ObjectStore store = cluster.getRpcClient().getObjectStore();
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);
bucket.createKey(keyName, 1000, RATIS, ONE, new HashMap<>());
cluster.stop();
try {
cluster = builder.build();
} catch (IOException e) {
fail("failed");
}
StorageContainerManager scm;
scm = cluster.getStorageContainerManager();
Assert.assertTrue(scm.isInSafeMode());
om = cluster.getOzoneManager();
final OzoneBucket bucket1 = cluster.getRpcClient().getObjectStore().getVolume(volumeName).getBucket(bucketName);
// As cluster is restarted with out datanodes restart
LambdaTestUtils.intercept(IOException.class, "SafeModePrecheck failed for allocateBlock", () -> bucket1.createKey(keyName, 1000, RATIS, ONE, new HashMap<>()));
}
use of org.apache.hadoop.ozone.om.helpers.OmKeyInfo in project ozone by apache.
the class ChunkKeyHandler method execute.
@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
containerOperationClient = new ContainerOperationClient(createOzoneConfiguration());
xceiverClientManager = containerOperationClient.getXceiverClientManager();
ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient();
address.ensureKeyAddress();
JsonElement element;
JsonObject result = new JsonObject();
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
String keyName = address.getKeyName();
List<ContainerProtos.ChunkInfo> tempchunks = null;
List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
HashSet<String> chunkPaths = new HashSet<>();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build();
OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
// querying the keyLocations.The OM is queried to get containerID and
// localID pertaining to a given key
List<OmKeyLocationInfo> locationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
// for zero-sized key
if (locationInfos.isEmpty()) {
System.out.println("No Key Locations Found");
return;
}
ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion.getConfiguredVersion(getConf());
JsonArray responseArrayList = new JsonArray();
for (OmKeyLocationInfo keyLocation : locationInfos) {
ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
long containerId = keyLocation.getContainerID();
chunkPaths.clear();
Pipeline pipeline = keyLocation.getPipeline();
if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
}
xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
// Datanode is queried to get chunk information.Thus querying the
// OM,SCM and datanode helps us get chunk location information
ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
// doing a getBlock on all nodes
HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses = null;
try {
responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, datanodeBlockID, keyLocation.getToken());
} catch (InterruptedException e) {
LOG.error("Execution interrupted due to " + e);
Thread.currentThread().interrupt();
}
JsonArray responseFromAllNodes = new JsonArray();
for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto> entry : responses.entrySet()) {
JsonObject jsonObj = new JsonObject();
if (entry.getValue() == null) {
LOG.error("Cant execute getBlock on this node");
continue;
}
tempchunks = entry.getValue().getBlockData().getChunksList();
ContainerProtos.ContainerDataProto containerData = containerOperationClient.readContainer(keyLocation.getContainerID(), keyLocation.getPipeline());
for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
String fileName = containerLayoutVersion.getChunkFile(new File(getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
chunkPaths.add(fileName);
ChunkDetails chunkDetails = new ChunkDetails();
chunkDetails.setChunkName(fileName);
chunkDetails.setChunkOffset(chunkInfo.getOffset());
chunkDetailsList.add(chunkDetails);
}
containerChunkInfoVerbose.setContainerPath(containerData.getContainerPath());
containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
containerChunkInfo.setFiles(chunkPaths);
containerChunkInfo.setPipelineID(keyLocation.getPipeline().getId().getId());
Gson gson = new GsonBuilder().create();
if (isVerbose()) {
element = gson.toJsonTree(containerChunkInfoVerbose);
} else {
element = gson.toJsonTree(containerChunkInfo);
}
jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
jsonObj.addProperty("Container-ID", containerId);
jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
jsonObj.add("Locations", element);
responseFromAllNodes.add(jsonObj);
xceiverClientManager.releaseClientForReadData(xceiverClient, false);
}
responseArrayList.add(responseFromAllNodes);
}
result.add("KeyLocations", responseArrayList);
Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
String prettyJson = gson2.toJson(result);
System.out.println(prettyJson);
}
Aggregations