Search in sources :

Example 1 with DatanodeDetailsProto

use of org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto in project ozone by apache.

the class TestEndpoints method setUp.

@Before
public void setUp() throws Exception {
    // The following setup runs only once
    if (!isSetupDone) {
        initializeInjector();
        isSetupDone = true;
    }
    String datanodeId = datanodeDetails.getUuid().toString();
    String datanodeId2 = datanodeDetails2.getUuid().toString();
    containerReportsProto = ContainerReportsProto.newBuilder().addReports(ContainerReplicaProto.newBuilder().setContainerID(containerId).setState(ContainerReplicaProto.State.OPEN).setOriginNodeId(datanodeId).build()).build();
    UUID pipelineUuid = UUID.fromString(pipelineId);
    HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder().setMostSigBits(pipelineUuid.getMostSignificantBits()).setLeastSigBits(pipelineUuid.getLeastSignificantBits()).build();
    PipelineReport pipelineReport = PipelineReport.newBuilder().setPipelineID(PipelineID.newBuilder().setId(pipelineId).setUuid128(uuid128).build()).setIsLeader(true).build();
    PipelineReportsProto pipelineReportsProto = PipelineReportsProto.newBuilder().addPipelineReport(pipelineReport).build();
    DatanodeDetailsProto datanodeDetailsProto = DatanodeDetailsProto.newBuilder().setHostName(HOST1).setUuid(datanodeId).setIpAddress(IP1).build();
    extendedDatanodeDetailsProto = HddsProtos.ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto).setVersion("0.6.0").setSetupTime(1596347628802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
    StorageReportProto storageReportProto1 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(10000).setRemaining(5400).setCapacity(25000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
    StorageReportProto storageReportProto2 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(25000).setRemaining(10000).setCapacity(50000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
    NodeReportProto nodeReportProto = NodeReportProto.newBuilder().addStorageReport(storageReportProto1).addStorageReport(storageReportProto2).build();
    DatanodeDetailsProto datanodeDetailsProto2 = DatanodeDetailsProto.newBuilder().setHostName(HOST2).setUuid(datanodeId2).setIpAddress(IP2).build();
    ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto2 = ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto2).setVersion("0.6.0").setSetupTime(1596347636802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
    StorageReportProto storageReportProto3 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800).setCapacity(50000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
    StorageReportProto storageReportProto4 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(60000).setRemaining(10000).setCapacity(80000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
    NodeReportProto nodeReportProto2 = NodeReportProto.newBuilder().addStorageReport(storageReportProto3).addStorageReport(storageReportProto4).build();
    LayoutVersionProto layoutInfo = defaultLayoutVersionProto();
    try {
        reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto, nodeReportProto, containerReportsProto, pipelineReportsProto, layoutInfo);
        reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto2, nodeReportProto2, ContainerReportsProto.newBuilder().build(), PipelineReportsProto.newBuilder().build(), defaultLayoutVersionProto());
        // Process all events in the event queue
        reconScm.getEventQueue().processAll(1000);
    } catch (Exception ex) {
        Assert.fail(ex.getMessage());
    }
    // Write Data to OM
    // A sample volume (sampleVol) and a bucket (bucketOne) is already created
    // in AbstractOMMetadataManagerTest.
    // Create a new volume and bucket and then write keys to the bucket.
    String volumeKey = reconOMMetadataManager.getVolumeKey("sampleVol2");
    OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol2").setAdminName("TestUser").setOwnerName("TestUser").build();
    reconOMMetadataManager.getVolumeTable().put(volumeKey, args);
    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName("sampleVol2").setBucketName("bucketOne").build();
    String bucketKey = reconOMMetadataManager.getBucketKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName());
    reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
    // key = key_one
    writeDataToOm(reconOMMetadataManager, "key_one");
    // key = key_two
    writeDataToOm(reconOMMetadataManager, "key_two");
    // key = key_three
    writeDataToOm(reconOMMetadataManager, "key_three");
    // Truncate global stats table before running each test
    dslContext.truncate(GLOBAL_STATS);
}
Also used : ExtendedDatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) PipelineReport(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport) LayoutVersionProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto) UpgradeUtils.defaultLayoutVersionProto(org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto) OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) DatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto) ExtendedDatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto) IOException(java.io.IOException) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) UUID(java.util.UUID) PipelineReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) Before(org.junit.Before)

Example 2 with DatanodeDetailsProto

use of org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto in project ozone by apache.

the class TestOpenContainerCount method setUp.

@Before
public void setUp() throws Exception {
    // The following setup runs only once
    if (!isSetupDone) {
        initializeInjector();
        isSetupDone = true;
    }
    datanodeId = datanodeDetails.getUuid().toString();
    // initialize container report
    builder = ContainerReportsProto.newBuilder();
    for (long i = 1L; i <= 10L; i++) {
        builder.addReports(ContainerReplicaProto.newBuilder().setContainerID(i).setState(ContainerReplicaProto.State.OPEN).setOriginNodeId(datanodeId).build());
    }
    containerReportsProto = builder.build();
    UUID pipelineUuid = UUID.fromString(pipelineId);
    HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder().setMostSigBits(pipelineUuid.getMostSignificantBits()).setLeastSigBits(pipelineUuid.getLeastSignificantBits()).build();
    UUID pipelineUuid2 = UUID.fromString(pipelineId2);
    HddsProtos.UUID uuid1282 = HddsProtos.UUID.newBuilder().setMostSigBits(pipelineUuid2.getMostSignificantBits()).setLeastSigBits(pipelineUuid2.getLeastSignificantBits()).build();
    PipelineReport pipelineReport = PipelineReport.newBuilder().setPipelineID(PipelineID.newBuilder().setId(pipelineId).setUuid128(uuid128).build()).setIsLeader(true).build();
    PipelineReport pipelineReport2 = PipelineReport.newBuilder().setPipelineID(PipelineID.newBuilder().setId(pipelineId2).setUuid128(uuid1282).build()).setIsLeader(false).build();
    pipelineReportsProto = PipelineReportsProto.newBuilder().addPipelineReport(pipelineReport).addPipelineReport(pipelineReport2).build();
    DatanodeDetailsProto datanodeDetailsProto = DatanodeDetailsProto.newBuilder().setHostName(HOST1).setUuid(datanodeId).setIpAddress(IP1).build();
    extendedDatanodeDetailsProto = HddsProtos.ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto).setVersion("0.6.0").setSetupTime(1596347628802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
    StorageReportProto storageReportProto1 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(10 * OzoneConsts.GB).setRemaining(90 * OzoneConsts.GB).setCapacity(100 * OzoneConsts.GB).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
    StorageReportProto storageReportProto2 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(10 * OzoneConsts.GB).setRemaining(90 * OzoneConsts.GB).setCapacity(100 * OzoneConsts.GB).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
    nodeReportProto = NodeReportProto.newBuilder().addStorageReport(storageReportProto1).addStorageReport(storageReportProto2).build();
    try {
        reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto, nodeReportProto, containerReportsProto, pipelineReportsProto, defaultLayoutVersionProto());
        // Process all events in the event queue
        reconScm.getEventQueue().processAll(1000);
    } catch (Exception ex) {
        Assert.fail(ex.getMessage());
    }
}
Also used : PipelineReport(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) UUID(java.util.UUID) DatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto) ExtendedDatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto) IOException(java.io.IOException) Before(org.junit.Before)

Example 3 with DatanodeDetailsProto

use of org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto in project ozone by apache.

the class ScmTestMock method register.

/**
 * Register Datanode.
 *
 * @param extendedDatanodeDetailsProto ExtendedDatanodDetailsProto.
 * @return SCM Command.
 */
@Override
public StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto register(ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto, NodeReportProto nodeReport, ContainerReportsProto containerReportsRequestProto, PipelineReportsProto pipelineReportsProto, LayoutVersionProto layoutInfo) throws IOException {
    rpcCount.incrementAndGet();
    DatanodeDetailsProto datanodeDetailsProto = extendedDatanodeDetailsProto.getDatanodeDetails();
    updateNodeReport(datanodeDetailsProto, nodeReport);
    updateContainerReport(containerReportsRequestProto, datanodeDetailsProto);
    sleepIfNeeded();
    return StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.newBuilder().setClusterID(UUID.randomUUID().toString()).setDatanodeUUID(datanodeDetailsProto.getUuid()).setErrorCode(StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.success).build();
}
Also used : DatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto) ExtendedDatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto)

Example 4 with DatanodeDetailsProto

use of org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto in project ozone by apache.

the class Pipeline method getFromProtobuf.

public static Pipeline getFromProtobuf(HddsProtos.Pipeline pipeline) throws UnknownPipelineStateException {
    Preconditions.checkNotNull(pipeline, "Pipeline is null");
    List<DatanodeDetails> nodes = new ArrayList<>();
    for (DatanodeDetailsProto member : pipeline.getMembersList()) {
        nodes.add(DatanodeDetails.getFromProtoBuf(member));
    }
    UUID leaderId = null;
    if (pipeline.hasLeaderID128()) {
        HddsProtos.UUID uuid = pipeline.getLeaderID128();
        leaderId = new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits());
    } else if (pipeline.hasLeaderID() && StringUtils.isNotEmpty(pipeline.getLeaderID())) {
        leaderId = UUID.fromString(pipeline.getLeaderID());
    }
    UUID suggestedLeaderId = null;
    if (pipeline.hasSuggestedLeaderID()) {
        HddsProtos.UUID uuid = pipeline.getSuggestedLeaderID();
        suggestedLeaderId = new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits());
    }
    final ReplicationConfig config = ReplicationConfig.fromProtoTypeAndFactor(pipeline.getType(), pipeline.getFactor());
    return new Builder().setId(PipelineID.getFromProtobuf(pipeline.getId())).setReplicationConfig(config).setState(PipelineState.fromProtobuf(pipeline.getState())).setNodes(nodes).setLeaderId(leaderId).setSuggestedLeaderId(suggestedLeaderId).setNodesInOrder(pipeline.getMemberOrdersList()).setCreateTimestamp(pipeline.getCreationTimeStamp()).build();
}
Also used : ReplicationConfig(org.apache.hadoop.hdds.client.ReplicationConfig) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) HashCodeBuilder(org.apache.commons.lang3.builder.HashCodeBuilder) EqualsBuilder(org.apache.commons.lang3.builder.EqualsBuilder) ArrayList(java.util.ArrayList) DatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto) UUID(java.util.UUID)

Aggregations

DatanodeDetailsProto (org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto)4 UUID (java.util.UUID)3 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)3 ExtendedDatanodeDetailsProto (org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto)3 IOException (java.io.IOException)2 PipelineReport (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport)2 StorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto)2 Before (org.junit.Before)2 ArrayList (java.util.ArrayList)1 EqualsBuilder (org.apache.commons.lang3.builder.EqualsBuilder)1 HashCodeBuilder (org.apache.commons.lang3.builder.HashCodeBuilder)1 ReplicationConfig (org.apache.hadoop.hdds.client.ReplicationConfig)1 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)1 LayoutVersionProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto)1 NodeReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto)1 PipelineReportsProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto)1 UpgradeUtils.defaultLayoutVersionProto (org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto)1 OmBucketInfo (org.apache.hadoop.ozone.om.helpers.OmBucketInfo)1 OmVolumeArgs (org.apache.hadoop.ozone.om.helpers.OmVolumeArgs)1 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)1