Search in sources :

Example 1 with MultipleIOException

use of org.apache.hadoop.io.MultipleIOException in project hbase by apache.

the class OfflineMetaRepair method main.

/**
   * Main program
   *
   * @param args
   * @throws Exception
   */
public static void main(String[] args) throws Exception {
    // create a fsck object
    Configuration conf = HBaseConfiguration.create();
    // Cover both bases, the old way of setting default fs and the new.
    // We're supposed to run on 0.20 and 0.21 anyways.
    FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
    HBaseFsck fsck = new HBaseFsck(conf);
    boolean fixHoles = false;
    // Process command-line args.
    for (int i = 0; i < args.length; i++) {
        String cmd = args[i];
        if (cmd.equals("-details")) {
            HBaseFsck.setDisplayFullReport();
        } else if (cmd.equals("-base")) {
            if (i == args.length - 1) {
                System.err.println("OfflineMetaRepair: -base needs an HDFS path.");
                printUsageAndExit();
            }
            // update hbase root dir to user-specified base
            i++;
            FSUtils.setRootDir(conf, new Path(args[i]));
            FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
        } else if (cmd.equals("-sidelineDir")) {
            if (i == args.length - 1) {
                System.err.println("OfflineMetaRepair: -sidelineDir needs an HDFS path.");
                printUsageAndExit();
            }
            // set the hbck sideline dir to user-specified one
            i++;
            fsck.setSidelineDir(args[i]);
        } else if (cmd.equals("-fixHoles")) {
            fixHoles = true;
        } else if (cmd.equals("-fix")) {
            // make all fix options true
            fixHoles = true;
        } else {
            String str = "Unknown command line option : " + cmd;
            LOG.info(str);
            System.out.println(str);
            printUsageAndExit();
        }
    }
    System.out.println("OfflineMetaRepair command line options: " + StringUtils.join(args, " "));
    // Fsck doesn't shutdown and and doesn't provide a way to shutdown its
    // threads cleanly, so we do a System.exit.
    boolean success = false;
    try {
        success = fsck.rebuildMeta(fixHoles);
    } catch (MultipleIOException mioes) {
        for (IOException ioe : mioes.getExceptions()) {
            LOG.error("Bailed out due to:", ioe);
        }
    } catch (Exception e) {
        LOG.error("Bailed out due to: ", e);
    } finally {
        System.exit(success ? 0 : 1);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HBaseFsck(org.apache.hadoop.hbase.util.HBaseFsck) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException)

Example 2 with MultipleIOException

use of org.apache.hadoop.io.MultipleIOException in project hadoop by apache.

the class TestFsDatasetImpl method testAddVolumeFailureReleasesInUseLock.

@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
    FsDatasetImpl spyDataset = spy(dataset);
    FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
    File badDir = new File(BASE_DIR, "bad");
    badDir.mkdirs();
    doReturn(mockVolume).when(spyDataset).createFsVolume(anyString(), any(StorageDirectory.class), any(StorageLocation.class));
    doThrow(new IOException("Failed to getVolumeMap()")).when(mockVolume).getVolumeMap(anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class));
    Storage.StorageDirectory sd = createStorageDirectory(badDir);
    sd.lock();
    DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
    when(storage.prepareVolume(eq(datanode), eq(StorageLocation.parse(badDir.toURI().toString())), Matchers.<List<NamespaceInfo>>any())).thenReturn(builder);
    StorageLocation location = StorageLocation.parse(badDir.toString());
    List<NamespaceInfo> nsInfos = Lists.newArrayList();
    for (String bpid : BLOCK_POOL_IDS) {
        nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
    }
    try {
        spyDataset.addVolume(location, nsInfos);
        fail("Expect to throw MultipleIOException");
    } catch (MultipleIOException e) {
    }
    FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
Also used : DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) Matchers.anyString(org.mockito.Matchers.anyString) MultipleIOException(org.apache.hadoop.io.MultipleIOException) DataStorage(org.apache.hadoop.hdfs.server.datanode.DataStorage) Storage(org.apache.hadoop.hdfs.server.common.Storage) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) NamespaceInfo(org.apache.hadoop.hdfs.server.protocol.NamespaceInfo) File(java.io.File) Test(org.junit.Test)

Aggregations

IOException (java.io.IOException)2 MultipleIOException (org.apache.hadoop.io.MultipleIOException)2 File (java.io.File)1 Configuration (org.apache.hadoop.conf.Configuration)1 Path (org.apache.hadoop.fs.Path)1 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)1 HBaseFsck (org.apache.hadoop.hbase.util.HBaseFsck)1 Storage (org.apache.hadoop.hdfs.server.common.Storage)1 StorageDirectory (org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory)1 DataStorage (org.apache.hadoop.hdfs.server.datanode.DataStorage)1 StorageLocation (org.apache.hadoop.hdfs.server.datanode.StorageLocation)1 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)1 Test (org.junit.Test)1 Matchers.anyString (org.mockito.Matchers.anyString)1