Search in sources :

Example 1 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class DataNode method initStorage.

/**
   * Initializes the {@link #data}. The initialization is done only once, when
   * handshake with the the first namenode is completed.
   */
private void initStorage(final NamespaceInfo nsInfo) throws IOException {
    final FsDatasetSpi.Factory<? extends FsDatasetSpi<?>> factory = FsDatasetSpi.Factory.getFactory(getConf());
    if (!factory.isSimulated()) {
        final StartupOption startOpt = getStartupOption(getConf());
        if (startOpt == null) {
            throw new IOException("Startup option not set.");
        }
        final String bpid = nsInfo.getBlockPoolID();
        //read storage info, lock data dirs and transition fs state if necessary
        synchronized (this) {
            storage.recoverTransitionRead(this, nsInfo, dataDirs, startOpt);
        }
        final StorageInfo bpStorage = storage.getBPStorage(bpid);
        LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID() + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion() + ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
    }
    // If this is a newly formatted DataNode then assign a new DatanodeUuid.
    checkDatanodeUuid();
    synchronized (this) {
        if (data == null) {
            data = factory.newInstance(this, storage, getConf());
        }
    }
}
Also used : FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) IOException(java.io.IOException)

Example 2 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class DataNode method createBPRegistration.

/**
   * Create a DatanodeRegistration for a specific block pool.
   * @param nsInfo the namespace info from the first part of the NN handshake
   */
DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
    StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
    if (storageInfo == null) {
        // it's null in the case of SimulatedDataSet
        storageInfo = new StorageInfo(DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION, nsInfo.getNamespaceID(), nsInfo.clusterID, nsInfo.getCTime(), NodeType.DATA_NODE);
    }
    DatanodeID dnId = new DatanodeID(streamingAddr.getAddress().getHostAddress(), hostName, storage.getDatanodeUuid(), getXferPort(), getInfoPort(), infoSecurePort, getIpcPort());
    return new DatanodeRegistration(dnId, storageInfo, new ExportedBlockKeys(), VersionInfo.getVersion());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)

Example 3 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestPBHelper method testConvertDatanodeRegistration.

@Test
public void testConvertDatanodeRegistration() {
    DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
    BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
    ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10, getBlockKey(1), keys);
    DatanodeRegistration reg = new DatanodeRegistration(dnId, new StorageInfo(NodeType.DATA_NODE), expKeys, "3.0.0");
    DatanodeRegistrationProto proto = PBHelper.convert(reg);
    DatanodeRegistration reg2 = PBHelper.convert(proto);
    compare(reg.getStorageInfo(), reg2.getStorageInfo());
    compare(reg.getExportedKeys(), reg2.getExportedKeys());
    compare(reg, reg2);
    assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeRegistrationProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) ExportedBlockKeys(org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys) BlockKey(org.apache.hadoop.hdfs.security.token.block.BlockKey) Test(org.junit.Test)

Example 4 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestPBHelper method testConvertNamenodeRegistration.

@Test
public void testConvertNamenodeRegistration() {
    StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
    NamenodeRegistration reg = new NamenodeRegistration("address:999", "http:1000", info, NamenodeRole.NAMENODE);
    NamenodeRegistrationProto regProto = PBHelper.convert(reg);
    NamenodeRegistration reg2 = PBHelper.convert(regProto);
    assertEquals(reg.getAddress(), reg2.getAddress());
    assertEquals(reg.getClusterID(), reg2.getClusterID());
    assertEquals(reg.getCTime(), reg2.getCTime());
    assertEquals(reg.getHttpAddress(), reg2.getHttpAddress());
    assertEquals(reg.getLayoutVersion(), reg2.getLayoutVersion());
    assertEquals(reg.getNamespaceID(), reg2.getNamespaceID());
    assertEquals(reg.getRegistrationID(), reg2.getRegistrationID());
    assertEquals(reg.getRole(), reg2.getRole());
    assertEquals(reg.getVersion(), reg2.getVersion());
}
Also used : NamenodeRegistration(org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration) NamenodeRegistrationProto(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.NamenodeRegistrationProto) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) Test(org.junit.Test)

Example 5 with StorageInfo

use of org.apache.hadoop.hdfs.server.common.StorageInfo in project hadoop by apache.

the class TestPBHelper method testConvertStoragInfo.

@Test
public void testConvertStoragInfo() {
    StorageInfo info = getStorageInfo(NodeType.NAME_NODE);
    StorageInfoProto infoProto = PBHelper.convert(info);
    StorageInfo info2 = PBHelper.convert(infoProto, NodeType.NAME_NODE);
    assertEquals(info.getClusterID(), info2.getClusterID());
    assertEquals(info.getCTime(), info2.getCTime());
    assertEquals(info.getLayoutVersion(), info2.getLayoutVersion());
    assertEquals(info.getNamespaceID(), info2.getNamespaceID());
}
Also used : StorageInfoProto(org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.StorageInfoProto) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) Test(org.junit.Test)

Aggregations

StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)17 Test (org.junit.Test)11 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)7 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)6 Configuration (org.apache.hadoop.conf.Configuration)5 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)5 ExportedBlockKeys (org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys)4 NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)4 File (java.io.File)3 IOException (java.io.IOException)3 Path (org.apache.hadoop.fs.Path)2 IncorrectVersionException (org.apache.hadoop.hdfs.server.common.IncorrectVersionException)2 ServiceException (com.google.protobuf.ServiceException)1 FileOutputStream (java.io.FileOutputStream)1 RandomAccessFile (java.io.RandomAccessFile)1 InetSocketAddress (java.net.InetSocketAddress)1 URL (java.net.URL)1 HAServiceState (org.apache.hadoop.ha.HAServiceProtocol.HAServiceState)1 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1