Search in sources :

Example 31 with NamespaceInfo

use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.

the class TestFsDatasetImpl method testRemoveNewlyAddedVolume.

@Test(timeout = 5000)
public void testRemoveNewlyAddedVolume() throws IOException {
    final int numExistingVolumes = getNumVolumes();
    List<NamespaceInfo> nsInfos = new ArrayList<>();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    String newVolumePath = BASE_DIR + "/newVolumeToRemoveLater";
    StorageLocation loc = StorageLocation.parse(newVolumePath);
    Storage.StorageDirectory sd = createStorageDirectory(new File(newVolumePath));
    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
    dataset.addVolume(loc, nsInfos);
    assertEquals(numExistingVolumes + 1, getNumVolumes());
    when(storage.getNumStorageDirs()).thenReturn(numExistingVolumes + 1);
    when(storage.getStorageDir(numExistingVolumes)).thenReturn(sd);
    Set<StorageLocation> volumesToRemove = new HashSet<>();
    volumesToRemove.add(loc);
    dataset.removeVolumes(volumesToRemove, true);
    assertEquals(numExistingVolumes, getNumVolumes());
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) ArrayList(java.util.ArrayList) Matchers.anyString(org.mockito.Matchers.anyString) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 32 with NamespaceInfo

use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.

the class TestFsDatasetImpl method testAddVolumes.

@Test
public void testAddVolumes() throws IOException {
    final int numNewVolumes = 3;
    final int numExistingVolumes = getNumVolumes();
    final int totalVolumes = numNewVolumes + numExistingVolumes;
    Set<String> expectedVolumes = new HashSet<String>();
    List<NamespaceInfo> nsInfos = Lists.newArrayList();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    for (int i = 0; i < numNewVolumes; i++) {
        String path = BASE_DIR + "/newData" + i;
        String pathUri = new Path(path).toUri().toString();
        expectedVolumes.add(new File(pathUri).getAbsolutePath());
        StorageLocation loc = StorageLocation.parse(pathUri);
        Storage.StorageDirectory sd = createStorageDirectory(new File(path));
        DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
        when(storage.prepareVolume(eq(datanode), eq(loc), anyListOf(NamespaceInfo.class))).thenReturn(builder);
        dataset.addVolume(loc, nsInfos);
        LOG.info("expectedVolumes " + i + " is " + new File(pathUri).getAbsolutePath());
    }
    assertEquals(totalVolumes, getNumVolumes());
    assertEquals(totalVolumes, dataset.storageMap.size());
    Set<String> actualVolumes = new HashSet<String>();
    try (FsDatasetSpi.FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
        for (int i = 0; i < numNewVolumes; i++) {
            String volumeName = volumes.get(numExistingVolumes + i).toString();
            actualVolumes.add(volumeName);
            LOG.info("actualVolume " + i + " is " + volumeName);
        }
    }
    assertEquals(actualVolumes.size(), expectedVolumes.size());
    assertTrue(actualVolumes.containsAll(expectedVolumes));
}
Also used : Path(org.apache.hadoop.fs.Path) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) Matchers.anyString(org.mockito.Matchers.anyString) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) File(java.io.File) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 33 with NamespaceInfo

use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.

the class TestFsDatasetImpl method testAddVolumeFailureReleasesInUseLock.

@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
    FsDatasetImpl spyDataset = spy(dataset);
    FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
    File badDir = new File(BASE_DIR, "bad");
    badDir.mkdirs();
    doReturn(mockVolume).when(spyDataset).createFsVolume(anyString(), any(StorageDirectory.class), any(StorageLocation.class));
    doThrow(new IOException("Failed to getVolumeMap()")).when(mockVolume).getVolumeMap(anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class));
    Storage.StorageDirectory sd = createStorageDirectory(badDir);
    sd.lock();
    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(StorageLocation.parse(badDir.toURI().toString())), Matchers.<List<NamespaceInfo>>any())).thenReturn(builder);
    StorageLocation location = StorageLocation.parse(badDir.toString());
    List<NamespaceInfo> nsInfos = Lists.newArrayList();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    try {
        spyDataset.addVolume(location, nsInfos);
        fail("Expect to throw MultipleIOException");
    } catch (MultipleIOException e) {
    }
    FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) Matchers.anyString(org.mockito.Matchers.anyString) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) File(java.io.File) Test(org.junit.Test)

Example 34 with NamespaceInfo

use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.

the class NNThroughputBenchmark method getBlockPoolId.

private void getBlockPoolId(DistributedFileSystem unused) throws IOException {
    final NamespaceInfo nsInfo = nameNodeProto.versionRequest();
    bpid = nsInfo.getBlockPoolID();
}
Also used : NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)

Example 35 with NamespaceInfo

use of org.apache.hadoop.hdfs.server.protocol.NamespaceInfo in project hadoop by apache.

the class TestEditLog method setupEdits.

/**
   * Set up directories for tests. 
   *
   * Each rolled file is 10 txns long. 
   * A failed file is 2 txns long.
   * 
   * @param editUris directories to create edit logs in
   * @param numrolls number of times to roll the edit log during setup
   * @param closeOnFinish whether to close the edit log after setup
   * @param abortAtRolls Specifications for when to fail, see AbortSpec
   */
public static NNStorage setupEdits(List<URI> editUris, int numrolls, boolean closeOnFinish, AbortSpec... abortAtRolls) throws IOException {
    List<AbortSpec> aborts = new ArrayList<AbortSpec>(Arrays.asList(abortAtRolls));
    NNStorage storage = new NNStorage(getConf(), Collections.<URI>emptyList(), editUris);
    storage.format(new NamespaceInfo());
    FSEditLog editlog = getFSEditLog(storage);
    // open the edit log and add two transactions
    // logGenerationStamp is used, simply because it doesn't 
    // require complex arguments.
    editlog.initJournalsForWrite();
    editlog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
    for (int i = 2; i < TXNS_PER_ROLL; i++) {
        editlog.logGenerationStamp((long) 0);
    }
    editlog.logSync();
    // back into rotation automatically by rollEditLog
    for (int i = 0; i < numrolls; i++) {
        editlog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
        editlog.logGenerationStamp((long) i);
        editlog.logSync();
        while (aborts.size() > 0 && aborts.get(0).roll == (i + 1)) {
            AbortSpec spec = aborts.remove(0);
            editlog.getJournals().get(spec.logindex).abort();
        }
        for (int j = 3; j < TXNS_PER_ROLL; j++) {
            editlog.logGenerationStamp((long) i);
        }
        editlog.logSync();
    }
    if (closeOnFinish) {
        editlog.close();
    }
    FSImageTestUtil.logStorageContents(LOG, storage);
    return storage;
}
Also used : ArrayList(java.util.ArrayList) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)

Aggregations

NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)35 IOException (java.io.IOException)13 Test (org.junit.Test)13 File (java.io.File)8 InetSocketAddress (java.net.InetSocketAddress)7 Storage (org.apache.hadoop.hdfs.server.common.Storage)6 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)6 ArrayList (java.util.ArrayList)5 DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)5 DataStorage (org.apache.hadoop.hdfs.server.datanode.DataStorage)5 Configuration (org.apache.hadoop.conf.Configuration)4 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)4 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)4 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)4 NNHAStatusHeartbeat (org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat)4 SlowPeerReports (org.apache.hadoop.hdfs.server.protocol.SlowPeerReports)4 VolumeFailureSummary (org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary)4 HeartbeatResponse (org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse)3 MultipleIOException (org.apache.hadoop.io.MultipleIOException)3 Matchers.anyString (org.mockito.Matchers.anyString)3