Search in sources :

Example 1 with ExportedBlockKeys

use of org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys in project hadoop by apache.

the class TestDatanodeManager method testRemoveIncludedNode.

/**
   * Test whether removing a host from the includes list without adding it to
   * the excludes list will exclude it from data node reports.
   */
@Test
public void testRemoveIncludedNode() throws IOException {
    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
    // Set the write lock so that the DatanodeManager can start
    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
    DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
    HostFileManager hm = new HostFileManager();
    HostSet noNodes = new HostSet();
    HostSet oneNode = new HostSet();
    HostSet twoNodes = new HostSet();
    DatanodeRegistration dr1 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-123", 12345, 12345, 12345, 12345), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
    DatanodeRegistration dr2 = new DatanodeRegistration(new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-234", 23456, 23456, 23456, 23456), new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE), new ExportedBlockKeys(), "test");
    twoNodes.add(entry("127.0.0.1:12345"));
    twoNodes.add(entry("127.0.0.1:23456"));
    oneNode.add(entry("127.0.0.1:23456"));
    hm.refresh(twoNodes, noNodes);
    Whitebox.setInternalState(dm, "hostConfigManager", hm);
    // Register two data nodes to simulate them coming up.
    // We need to add two nodes, because if we have only one node, removing it
    // will cause the includes list to be empty, which means all hosts will be
    // allowed.
    dm.registerDatanode(dr1);
    dm.registerDatanode(dr2);
    // Make sure that both nodes are reported
    List<DatanodeDescriptor> both = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    // Sort the list so that we know which one is which
    Collections.sort(both);
    Assert.assertEquals("Incorrect number of hosts reported", 2, both.size());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", both.get(0).getInfoAddr());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", both.get(1).getInfoAddr());
    // Remove one node from includes, but do not add it to excludes.
    hm.refresh(oneNode, noNodes);
    // Make sure that only one node is still reported
    List<DatanodeDescriptor> onlyOne = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    Assert.assertEquals("Incorrect number of hosts reported", 1, onlyOne.size());
    Assert.assertEquals("Unexpected host reported", "127.0.0.1:23456", onlyOne.get(0).getInfoAddr());
    // Remove all nodes from includes
    hm.refresh(noNodes, noNodes);
    // Check that both nodes are reported again
    List<DatanodeDescriptor> bothAgain = dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL);
    // Sort the list so that we know which one is which
    Collections.sort(bothAgain);
    Assert.assertEquals("Incorrect number of hosts reported", 2, bothAgain.size());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:12345", bothAgain.get(0).getInfoAddr());
    Assert.assertEquals("Unexpected host or host in unexpected position", "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
}
Also used : DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Configuration(org.apache.hadoop.conf.Configuration) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 2 with ExportedBlockKeys

use of org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys in project hadoop by apache.

the class NamenodeProtocolServerSideTranslatorPB method getBlockKeys.

@Override
public GetBlockKeysResponseProto getBlockKeys(RpcController unused, GetBlockKeysRequestProto request) throws ServiceException {
    ExportedBlockKeys keys;
    try {
        keys = impl.getBlockKeys();
    } catch (IOException e) {
        throw new ServiceException(e);
    }
    GetBlockKeysResponseProto.Builder builder = GetBlockKeysResponseProto.newBuilder();
    if (keys != null) {
        builder.setKeys(PBHelper.convert(keys));
    }
    return builder.build();
}
Also used : ServiceException(com.google.protobuf.ServiceException) GetBlockKeysResponseProto(org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto) IOException(java.io.IOException) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)

Example 3 with ExportedBlockKeys

use of org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys in project hadoop by apache.

the class TestPBHelper method testConvertDatanodeRegistration.

@Test
public void testConvertDatanodeRegistration() {
    DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
    BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
    ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys);
    DatanodeRegistration reg = new DatanodeRegistration(dnId, new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
    DatanodeRegistrationProto proto = PBHelper.convert(reg);
    DatanodeRegistration reg2 = PBHelper.convert(proto);
    compare(reg.getStorageInfo(), reg2.getStorageInfo());
    compare(reg.getExportedKeys(), reg2.getExportedKeys());
    compare(reg, reg2);
    assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeRegistrationProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) BlockKey(org.apache.hadoop.hdfs.security.token.block.BlockKey) Test(org.junit.Test)

Example 4 with ExportedBlockKeys

use of org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys in project hadoop by apache.

the class TestPBHelper method testConvertExportedBlockKeys.

@Test
public void testConvertExportedBlockKeys() {
    BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
    ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys);
    ExportedBlockKeysProto expKeysProto = PBHelper.convert(expKeys);
    ExportedBlockKeys expKeys1 = PBHelper.convert(expKeysProto);
    compare(expKeys, expKeys1);
}
Also used : ExportedBlockKeysProto(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) BlockKey(org.apache.hadoop.hdfs.security.token.block.BlockKey) Test(org.junit.Test)

Example 5 with ExportedBlockKeys

use of org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys in project hadoop by apache.

the class DataNode method createBPRegistration.

/**
   * Create a DatanodeRegistration for a specific block pool.
   * @param nsInfo the namespace info from the first part of the NN handshake
   */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
    StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
    if (storageInfo == null) {
        // it's null in the case of SimulatedDataSet
        storageInfo = new StorageInfo(DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION, nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(), NodeType.DATA_NODE);
    }
    DatanodeID dnId = new DatanodeID(streamingAddr.getAddress().getHostAddress(), hostName, storage.getDatanodeUuid(), getXferPort(), getInfoPort(), infoSecurePort, getIpcPort());
    return new DatanodeRegistration(dnId, storageInfo, new ExportedBlockKeys(), VersionInfo.getVersion());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)

Aggregations

ExportedBlockKeys (org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)7 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)4 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 Test (org.junit.Test)4 BlockKey (org.apache.hadoop.hdfs.security.token.block.BlockKey)2 ServiceException (com.google.protobuf.ServiceException)1 IOException (java.io.IOException)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 DatanodeRegistrationProto (org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto)1 ExportedBlockKeysProto (org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.ExportedBlockKeysProto)1 GetBlockKeysResponseProto (org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto)1 BlockTokenSecretManager (org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager)1 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)1