Search in sources :

Example 1 with ContainerOperationClient

use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.

the class ChunkKeyHandler method execute.

@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
    containerOperationClient = new ContainerOperationClient(createOzoneConfiguration());
    xceiverClientManager = containerOperationClient.getXceiverClientManager();
    ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient();
    address.ensureKeyAddress();
    JsonElement element;
    JsonObject result = new JsonObject();
    String volumeName = address.getVolumeName();
    String bucketName = address.getBucketName();
    String keyName = address.getKeyName();
    List<ContainerProtos.ChunkInfo> tempchunks = null;
    List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
    HashSet<String> chunkPaths = new HashSet<>();
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build();
    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
    // querying  the keyLocations.The OM is queried to get containerID and
    // localID pertaining to a given key
    List<OmKeyLocationInfo> locationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
    // for zero-sized key
    if (locationInfos.isEmpty()) {
        System.out.println("No Key Locations Found");
        return;
    }
    ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion.getConfiguredVersion(getConf());
    JsonArray responseArrayList = new JsonArray();
    for (OmKeyLocationInfo keyLocation : locationInfos) {
        ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
        ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
        long containerId = keyLocation.getContainerID();
        chunkPaths.clear();
        Pipeline pipeline = keyLocation.getPipeline();
        if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
            pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
        }
        xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
        // Datanode is queried to get chunk information.Thus querying the
        // OM,SCM and datanode helps us get chunk location information
        ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
        // doing a getBlock on all nodes
        HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses = null;
        try {
            responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, datanodeBlockID, keyLocation.getToken());
        } catch (InterruptedException e) {
            LOG.error("Execution interrupted due to " + e);
            Thread.currentThread().interrupt();
        }
        JsonArray responseFromAllNodes = new JsonArray();
        for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto> entry : responses.entrySet()) {
            JsonObject jsonObj = new JsonObject();
            if (entry.getValue() == null) {
                LOG.error("Cant execute getBlock on this node");
                continue;
            }
            tempchunks = entry.getValue().getBlockData().getChunksList();
            ContainerProtos.ContainerDataProto containerData = containerOperationClient.readContainer(keyLocation.getContainerID(), keyLocation.getPipeline());
            for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
                String fileName = containerLayoutVersion.getChunkFile(new File(getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
                chunkPaths.add(fileName);
                ChunkDetails chunkDetails = new ChunkDetails();
                chunkDetails.setChunkName(fileName);
                chunkDetails.setChunkOffset(chunkInfo.getOffset());
                chunkDetailsList.add(chunkDetails);
            }
            containerChunkInfoVerbose.setContainerPath(containerData.getContainerPath());
            containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
            containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
            containerChunkInfo.setFiles(chunkPaths);
            containerChunkInfo.setPipelineID(keyLocation.getPipeline().getId().getId());
            Gson gson = new GsonBuilder().create();
            if (isVerbose()) {
                element = gson.toJsonTree(containerChunkInfoVerbose);
            } else {
                element = gson.toJsonTree(containerChunkInfo);
            }
            jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
            jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
            jsonObj.addProperty("Container-ID", containerId);
            jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
            jsonObj.add("Locations", element);
            responseFromAllNodes.add(jsonObj);
            xceiverClientManager.releaseClientForReadData(xceiverClient, false);
        }
        responseArrayList.add(responseFromAllNodes);
    }
    result.add("KeyLocations", responseArrayList);
    Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
    String prettyJson = gson2.toJson(result);
    System.out.println(prettyJson);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ContainerLayoutVersion(org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion) ArrayList(java.util.ArrayList) JsonObject(com.google.gson.JsonObject) Gson(com.google.gson.Gson) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) HashSet(java.util.HashSet) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) GsonBuilder(com.google.gson.GsonBuilder) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) ContainerOperationClient(org.apache.hadoop.hdds.scm.cli.ContainerOperationClient) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) JsonArray(com.google.gson.JsonArray) JsonElement(com.google.gson.JsonElement) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File)

Example 2 with ContainerOperationClient

use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.

the class TestContainerBalancerOperations method setup.

@BeforeClass
public static void setup() throws Exception {
    ozoneConf = new OzoneConfiguration();
    ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class);
    cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build();
    containerBalancerClient = new ContainerOperationClient(ozoneConf);
    cluster.waitForClusterToBeReady();
}
Also used : OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) ContainerOperationClient(org.apache.hadoop.hdds.scm.cli.ContainerOperationClient) BeforeClass(org.junit.BeforeClass)

Example 3 with ContainerOperationClient

use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.

the class TestContainerOperations method setup.

@BeforeClass
public static void setup() throws Exception {
    ozoneConf = new OzoneConfiguration();
    ozoneConf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementCapacity.class, PlacementPolicy.class);
    cluster = MiniOzoneCluster.newBuilder(ozoneConf).setNumDatanodes(3).build();
    storageClient = new ContainerOperationClient(ozoneConf);
    cluster.waitForClusterToBeReady();
}
Also used : OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) ContainerOperationClient(org.apache.hadoop.hdds.scm.cli.ContainerOperationClient) BeforeClass(org.junit.BeforeClass)

Example 4 with ContainerOperationClient

use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.

the class TestDatanodeHddsVolumeFailureDetection method init.

@Before
public void init() throws Exception {
    ozoneConfig = new OzoneConfiguration();
    ozoneConfig.set(OZONE_SCM_CONTAINER_SIZE, "1GB");
    ozoneConfig.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB);
    ozoneConfig.setInt(OZONE_REPLICATION, ReplicationFactor.ONE.getValue());
    // keep the cache size = 1, so we could trigger io exception on
    // reading on-disk db instance
    ozoneConfig.setInt(OZONE_CONTAINER_CACHE_SIZE, 1);
    // set tolerated = 1
    // shorten the gap between successive checks to ease tests
    DatanodeConfiguration dnConf = ozoneConfig.getObject(DatanodeConfiguration.class);
    dnConf.setFailedDataVolumesTolerated(1);
    dnConf.setDiskCheckMinGap(Duration.ofSeconds(5));
    ozoneConfig.setFromObject(dnConf);
    cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1).setNumDataVolumes(1).build();
    cluster.waitForClusterToBeReady();
    cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000);
    ozClient = OzoneClientFactory.getRpcClient(ozoneConfig);
    store = ozClient.getObjectStore();
    scmClient = new ContainerOperationClient(ozoneConfig);
    String volumeName = UUID.randomUUID().toString();
    store.createVolume(volumeName);
    volume = store.getVolume(volumeName);
    String bucketName = UUID.randomUUID().toString();
    volume.createBucket(bucketName);
    bucket = volume.getBucket(bucketName);
    datanodes = cluster.getHddsDatanodes();
}
Also used : OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) DatanodeConfiguration(org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration) ContainerOperationClient(org.apache.hadoop.hdds.scm.cli.ContainerOperationClient) Before(org.junit.Before)

Example 5 with ContainerOperationClient

use of org.apache.hadoop.hdds.scm.cli.ContainerOperationClient in project ozone by apache.

the class ClosedContainerReplicator method call.

@Override
public Void call() throws Exception {
    OzoneConfiguration conf = createOzoneConfiguration();
    final Collection<String> datanodeStorageDirs = HddsServerUtil.getDatanodeStorageDirs(conf);
    for (String dir : datanodeStorageDirs) {
        checkDestinationDirectory(dir);
    }
    // logic same as the download+import on the destination datanode
    initializeReplicationSupervisor(conf);
    final ContainerOperationClient containerOperationClient = new ContainerOperationClient(conf);
    final List<ContainerInfo> containerInfos = containerOperationClient.listContainer(0L, 1_000_000);
    replicationTasks = new ArrayList<>();
    for (ContainerInfo container : containerInfos) {
        final ContainerWithPipeline containerWithPipeline = containerOperationClient.getContainerWithPipeline(container.getContainerID());
        if (container.getState() == LifeCycleState.CLOSED) {
            final List<DatanodeDetails> datanodesWithContainer = containerWithPipeline.getPipeline().getNodes();
            final List<String> datanodeUUIDs = datanodesWithContainer.stream().map(DatanodeDetails::getUuidString).collect(Collectors.toList());
            // replica.
            if (datanode.isEmpty() || datanodeUUIDs.contains(datanode)) {
                replicationTasks.add(new ReplicationTask(container.getContainerID(), datanodesWithContainer));
            }
        }
    }
    // important: override the max number of tasks.
    setTestNo(replicationTasks.size());
    init();
    timer = getMetrics().timer("replicate-container");
    runTests(this::replicateContainer);
    return null;
}
Also used : ReplicationTask(org.apache.hadoop.ozone.container.replication.ReplicationTask) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) ContainerOperationClient(org.apache.hadoop.hdds.scm.cli.ContainerOperationClient)

Aggregations

ContainerOperationClient (org.apache.hadoop.hdds.scm.cli.ContainerOperationClient)7 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)5 Before (org.junit.Before)3 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)2 BeforeClass (org.junit.BeforeClass)2 Gson (com.google.gson.Gson)1 GsonBuilder (com.google.gson.GsonBuilder)1 JsonArray (com.google.gson.JsonArray)1 JsonElement (com.google.gson.JsonElement)1 JsonObject (com.google.gson.JsonObject)1 File (java.io.File)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 Map (java.util.Map)1 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)1 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)1 ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)1 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)1 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)1