use of org.apache.hadoop.io.MultipleIOException in project hbase by apache.
the class OfflineMetaRepair method main.
/**
* Main program
*
* @param args
* @throws Exception
*/
public static void main(String[] args) throws Exception {
// create a fsck object
Configuration conf = HBaseConfiguration.create();
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
HBaseFsck fsck = new HBaseFsck(conf);
boolean fixHoles = false;
// Process command-line args.
for (int i = 0; i < args.length; i++) {
String cmd = args[i];
if (cmd.equals("-details")) {
HBaseFsck.setDisplayFullReport();
} else if (cmd.equals("-base")) {
if (i == args.length - 1) {
System.err.println("OfflineMetaRepair: -base needs an HDFS path.");
printUsageAndExit();
}
// update hbase root dir to user-specified base
i++;
FSUtils.setRootDir(conf, new Path(args[i]));
FSUtils.setFsDefault(conf, FSUtils.getRootDir(conf));
} else if (cmd.equals("-sidelineDir")) {
if (i == args.length - 1) {
System.err.println("OfflineMetaRepair: -sidelineDir needs an HDFS path.");
printUsageAndExit();
}
// set the hbck sideline dir to user-specified one
i++;
fsck.setSidelineDir(args[i]);
} else if (cmd.equals("-fixHoles")) {
fixHoles = true;
} else if (cmd.equals("-fix")) {
// make all fix options true
fixHoles = true;
} else {
String str = "Unknown command line option : " + cmd;
LOG.info(str);
System.out.println(str);
printUsageAndExit();
}
}
System.out.println("OfflineMetaRepair command line options: " + StringUtils.join(args, " "));
// Fsck doesn't shutdown and and doesn't provide a way to shutdown its
// threads cleanly, so we do a System.exit.
boolean success = false;
try {
success = fsck.rebuildMeta(fixHoles);
} catch (MultipleIOException mioes) {
for (IOException ioe : mioes.getExceptions()) {
LOG.error("Bailed out due to:", ioe);
}
} catch (Exception e) {
LOG.error("Bailed out due to: ", e);
} finally {
System.exit(success ? 0 : 1);
}
}
use of org.apache.hadoop.io.MultipleIOException in project hadoop by apache.
the class TestFsDatasetImpl method testAddVolumeFailureReleasesInUseLock.
@Test
public void testAddVolumeFailureReleasesInUseLock() throws IOException {
FsDatasetImpl spyDataset = spy(dataset);
FsVolumeImpl mockVolume = mock(FsVolumeImpl.class);
File badDir = new File(BASE_DIR, "bad");
badDir.mkdirs();
doReturn(mockVolume).when(spyDataset).createFsVolume(anyString(), any(StorageDirectory.class), any(StorageLocation.class));
doThrow(new IOException("Failed to getVolumeMap()")).when(mockVolume).getVolumeMap(anyString(), any(ReplicaMap.class), any(RamDiskReplicaLruTracker.class));
Storage.StorageDirectory sd = createStorageDirectory(badDir);
sd.lock();
DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd);
when(storage.prepareVolume(eq(datanode), eq(StorageLocation.parse(badDir.toURI().toString())), Matchers.<List<NamespaceInfo>>any())).thenReturn(builder);
StorageLocation location = StorageLocation.parse(badDir.toString());
List<NamespaceInfo> nsInfos = Lists.newArrayList();
for (String bpid : BLOCK_POOL_IDS) {
nsInfos.add(new NamespaceInfo(0, CLUSTER_ID, bpid, 1));
}
try {
spyDataset.addVolume(location, nsInfos);
fail("Expect to throw MultipleIOException");
} catch (MultipleIOException e) {
}
FsDatasetTestUtil.assertFileLockReleased(badDir.toString());
}
Aggregations