use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestSecurityTokenEditLog method testEditLog.
/**
* Tests transaction logging in dfs.
*/
@Test
public void testEditLog() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
File dir = new File(it.next().getPath());
System.out.println(dir);
}
FSImage fsimage = namesystem.getFSImage();
FSEditLog editLog = fsimage.getEditLog();
// set small size of flush buffer
editLog.setOutputBufferCapacity(2048);
// Create threads and make them run transactions concurrently.
Thread[] threadId = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS);
threadId[i] = new Thread(trans, "TransactionThread-" + i);
threadId[i].start();
}
// wait for all transactions to get over
for (int i = 0; i < NUM_THREADS; i++) {
try {
threadId[i].join();
} catch (InterruptedException e) {
// retry
i--;
}
}
editLog.close();
// Verify that we can read in all the transactions that we have written.
// If there were any corruptions, it is likely that the reading in
// of these transactions will throw an exception.
//
namesystem.getDelegationTokenSecretManager().stopThreads();
int numKeys = namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
int expectedTransactions = NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys + // + 2 for BEGIN and END txns
2;
for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1);
System.out.println("Verifying file: " + editFile);
FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
long numEdits = loader.loadFSEdits(new EditLogFileInputStream(editFile), 1);
assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
}
} finally {
if (fileSys != null)
fileSys.close();
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestStorageRestore method invalidateStorage.
/**
* invalidate storage by removing the second and third storage directories
*/
public void invalidateStorage(FSImage fi, Set<File> filesToInvalidate) throws IOException {
ArrayList<StorageDirectory> al = new ArrayList<StorageDirectory>(2);
Iterator<StorageDirectory> it = fi.getStorage().dirIterator();
while (it.hasNext()) {
StorageDirectory sd = it.next();
if (filesToInvalidate.contains(sd.getRoot())) {
LOG.info("causing IO error on " + sd.getRoot());
al.add(sd);
}
}
// simulate an error
fi.getStorage().reportErrorsOnDirectories(al);
for (JournalAndStream j : fi.getEditLog().getJournals()) {
if (j.getManager() instanceof FileJournalManager) {
FileJournalManager fm = (FileJournalManager) j.getManager();
if (fm.getStorageDirectory().getRoot().equals(path2) || fm.getStorageDirectory().getRoot().equals(path3)) {
EditLogOutputStream mockStream = spy(j.getCurrentStream());
j.setCurrentStreamForTests(mockStream);
doThrow(new IOException("Injected fault: write")).when(mockStream).write(Mockito.<FSEditLogOp>anyObject());
}
}
}
}
use of org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory in project hadoop by apache.
the class TestHAStateTransitions method createEmptyInProgressEditLog.
private static void createEmptyInProgressEditLog(MiniDFSCluster cluster, NameNode nn, boolean writeHeader) throws IOException {
long txid = nn.getNamesystem().getEditLog().getLastWrittenTxId();
URI sharedEditsUri = cluster.getSharedEditsDir(0, 1);
File sharedEditsDir = new File(sharedEditsUri.getPath());
StorageDirectory storageDir = new StorageDirectory(sharedEditsDir);
File inProgressFile = NameNodeAdapter.getInProgressEditsFile(storageDir, txid + 1);
assertTrue("Failed to create in-progress edits file", inProgressFile.createNewFile());
if (writeHeader) {
DataOutputStream out = new DataOutputStream(new FileOutputStream(inProgressFile));
EditLogFileOutputStream.writeHeader(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, out);
out.close();
}
}
Aggregations