use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.
the class ChunkKeyHandler method execute.
@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
containerOperationClient = new ContainerOperationClient(createOzoneConfiguration());
xceiverClientManager = containerOperationClient.getXceiverClientManager();
ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient();
address.ensureKeyAddress();
JsonElement element;
JsonObject result = new JsonObject();
String volumeName = address.getVolumeName();
String bucketName = address.getBucketName();
String keyName = address.getKeyName();
List<ContainerProtos.ChunkInfo> tempchunks = null;
List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
HashSet<String> chunkPaths = new HashSet<>();
OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build();
OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
// querying the keyLocations.The OM is queried to get containerID and
// localID pertaining to a given key
List<OmKeyLocationInfo> locationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
// for zero-sized key
if (locationInfos.isEmpty()) {
System.out.println("No Key Locations Found");
return;
}
ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion.getConfiguredVersion(getConf());
JsonArray responseArrayList = new JsonArray();
for (OmKeyLocationInfo keyLocation : locationInfos) {
ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
long containerId = keyLocation.getContainerID();
chunkPaths.clear();
Pipeline pipeline = keyLocation.getPipeline();
if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
}
xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
// Datanode is queried to get chunk information.Thus querying the
// OM,SCM and datanode helps us get chunk location information
ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
// doing a getBlock on all nodes
HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses = null;
try {
responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, datanodeBlockID, keyLocation.getToken());
} catch (InterruptedException e) {
LOG.error("Execution interrupted due to " + e);
Thread.currentThread().interrupt();
}
JsonArray responseFromAllNodes = new JsonArray();
for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto> entry : responses.entrySet()) {
JsonObject jsonObj = new JsonObject();
if (entry.getValue() == null) {
LOG.error("Cant execute getBlock on this node");
continue;
}
tempchunks = entry.getValue().getBlockData().getChunksList();
ContainerProtos.ContainerDataProto containerData = containerOperationClient.readContainer(keyLocation.getContainerID(), keyLocation.getPipeline());
for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
String fileName = containerLayoutVersion.getChunkFile(new File(getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
chunkPaths.add(fileName);
ChunkDetails chunkDetails = new ChunkDetails();
chunkDetails.setChunkName(fileName);
chunkDetails.setChunkOffset(chunkInfo.getOffset());
chunkDetailsList.add(chunkDetails);
}
containerChunkInfoVerbose.setContainerPath(containerData.getContainerPath());
containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
containerChunkInfo.setFiles(chunkPaths);
containerChunkInfo.setPipelineID(keyLocation.getPipeline().getId().getId());
Gson gson = new GsonBuilder().create();
if (isVerbose()) {
element = gson.toJsonTree(containerChunkInfoVerbose);
} else {
element = gson.toJsonTree(containerChunkInfo);
}
jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
jsonObj.addProperty("Container-ID", containerId);
jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
jsonObj.add("Locations", element);
responseFromAllNodes.add(jsonObj);
xceiverClientManager.releaseClientForReadData(xceiverClient, false);
}
responseArrayList.add(responseFromAllNodes);
}
result.add("KeyLocations", responseArrayList);
Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
String prettyJson = gson2.toJson(result);
System.out.println(prettyJson);
}
use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.
the class TestContainerBalancerOperations method setup.
@BeforeClass
public static void setup() throws Exception {
ozoneConf = new OzoneConfiguration();
ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class);
cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build();
containerBalancerClient = new ContainerOperationClient(ozoneConf);
cluster.waitForClusterToBeReady();
}
use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.
the class TestContainerOperations method setup.
@BeforeClass
public static void setup() throws Exception {
ozoneConf = new OzoneConfiguration();
ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class);
cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build();
storageClient = new ContainerOperationClient(ozoneConf);
cluster.waitForClusterToBeReady();
}
use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.
the class TestDatanodeHddsVolumeFailureDetection method init.
@Before
public void init() throws Exception {
ozoneConfig = new OzoneConfiguration();
ozoneConfig.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
ozoneConfig.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB);
ozoneConfig.setInt(OZONE_REPLICATION, ReplicationFactor.ONE.getValue());
// keep the cache size = 1, so we could trigger io exception on
// reading on-disk db instance
ozoneConfig.setInt(OZONE_CONTAINER_CACHE_SIZE, 1);
// set tolerated = 1
// shorten the gap between successive checks to ease tests
DatanodeConfiguration dnConf = ozoneConfig.getObject(DatanodeConfiguration.class);
dnConf.setFailedDataVolumesTolerated(1);
dnConf.setDiskCheckMinGap(Duration.ofSeconds(5));
ozoneConfig.setFromObject(dnConf);
cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1).setNumDataVolumes(1).build();
cluster.waitForClusterToBeReady();
cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000);
ozClient = OzoneClientFactory.getRpcClient(ozoneConfig);
store = ozClient.getObjectStore();
scmClient = new ContainerOperationClient(ozoneConfig);
String volumeName = UUID.randomUUID().toString();
store.createVolume(volumeName);
volume = store.getVolume(volumeName);
String bucketName = UUID.randomUUID().toString();
volume.createBucket(bucketName);
bucket = volume.getBucket(bucketName);
datanodes = cluster.getHddsDatanodes();
}
use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.
the class ClosedContainerReplicator method call.
@Override
public Void call() throws Exception {
OzoneConfiguration conf = createOzoneConfiguration();
final Collection<String> datanodeStorageDirs = HddsServerUtil.getDatanodeStorageDirs(conf);
for (String dir : datanodeStorageDirs) {
checkDestinationDirectory(dir);
}
// logic same as the download+import on the destination datanode
initializeReplicationSupervisor(conf);
final ContainerOperationClient containerOperationClient = new ContainerOperationClient(conf);
final List<ContainerInfo> containerInfos = containerOperationClient.listContainer(0L, 1_000_000);
replicationTasks = new ArrayList<>();
for (ContainerInfo container : containerInfos) {
final ContainerWithPipeline containerWithPipeline = containerOperationClient.getContainerWithPipeline(container.getContainerID());
if (container.getState() == LifeCycleState.CLOSED) {
final List<DatanodeDetails> datanodesWithContainer = containerWithPipeline.getPipeline().getNodes();
final List<String> datanodeUUIDs = datanodesWithContainer.stream().map(DatanodeDetails::getUuidString).collect(Collectors.toList());
// replica.
if (datanode.isEmpty() || datanodeUUIDs.contains(datanode)) {
replicationTasks.add(new ReplicationTask(container.getContainerID(), datanodesWithContainer));
}
}
}
// important: override the max number of tasks.
setTestNo(replicationTasks.size());
init();
timer = getMetrics().timer("replicate-container");
runTests(this::replicateContainer);
return null;
}
Aggregations