use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestAzureConcurrentOutOfBandIo method testReadOOBWrites.
@Test
public void testReadOOBWrites() throws Exception {
byte[] dataBlockWrite = new byte[UPLOAD_BLOCK_SIZE];
byte[] dataBlockRead = new byte[UPLOAD_BLOCK_SIZE];
// Write to blob to make sure it exists.
//
// Write five 4 MB blocks to the blob. To ensure there is data in the blob before
// reading. This eliminates the race between the reader and writer threads.
OutputStream outputStream = testAccount.getStore().storefile("WASB_String.txt", new PermissionStatus("", "", FsPermission.getDefault()));
Arrays.fill(dataBlockWrite, (byte) 255);
for (int i = 0; i < NUMBER_OF_BLOCKS; i++) {
outputStream.write(dataBlockWrite);
}
outputStream.flush();
outputStream.close();
// Start writing blocks to Azure store using the DataBlockWriter thread.
DataBlockWriter writeBlockTask = new DataBlockWriter(testAccount, "WASB_String.txt");
writeBlockTask.startWriting();
int count = 0;
DataInputStream inputStream = null;
for (int i = 0; i < 5; i++) {
try {
inputStream = testAccount.getStore().retrieve("WASB_String.txt");
count = 0;
int c = 0;
while (c >= 0) {
c = inputStream.read(dataBlockRead, 0, UPLOAD_BLOCK_SIZE);
if (c < 0) {
break;
}
// Counting the number of bytes.
count += c;
}
} catch (IOException e) {
System.out.println(e.getCause().toString());
e.printStackTrace();
fail();
}
// Close the stream.
if (null != inputStream) {
inputStream.close();
}
}
// Stop writing blocks.
writeBlockTask.stopWriting();
// Validate that a block was read.
assertEquals(NUMBER_OF_BLOCKS * UPLOAD_BLOCK_SIZE, count);
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class NativeAzureFileSystem method setPermission.
@Override
public void setPermission(Path p, FsPermission permission) throws FileNotFoundException, IOException {
Path absolutePath = makeAbsolute(p);
performAuthCheck(absolutePath.toString(), WasbAuthorizationOperations.EXECUTE.toString(), "setPermission");
String key = pathToKey(absolutePath);
FileMetadata metadata = null;
try {
metadata = store.retrieveMetadata(key);
} catch (IOException ex) {
Throwable innerException = NativeAzureFileSystemHelper.checkForAzureStorageException(ex);
if (innerException instanceof StorageException && NativeAzureFileSystemHelper.isFileNotFoundException((StorageException) innerException)) {
throw new FileNotFoundException(String.format("File %s doesn't exists.", p));
}
throw ex;
}
if (metadata == null) {
throw new FileNotFoundException("File doesn't exist: " + p);
}
permission = applyUMask(permission, metadata.isDir() ? UMaskApplyMode.ChangeExistingDirectory : UMaskApplyMode.ChangeExistingFile);
if (metadata.getBlobMaterialization() == BlobMaterialization.Implicit) {
// It's an implicit folder, need to materialize it.
store.storeEmptyFolder(key, createPermissionStatus(permission));
} else if (!metadata.getPermissionStatus().getPermission().equals(permission)) {
store.changePermissionStatus(key, new PermissionStatus(metadata.getPermissionStatus().getUserName(), metadata.getPermissionStatus().getGroupName(), permission));
}
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestEditLogFileInputStream method testScanCorruptEditLog.
/**
* Regression test for HDFS-8965 which verifies that
* FSEditLogFileInputStream#scanOp verifies Op checksums.
*/
@Test(timeout = 60000)
public void testScanCorruptEditLog() throws Exception {
Configuration conf = new Configuration();
File editLog = new File(GenericTestUtils.getTempPath("testCorruptEditLog"));
LOG.debug("Creating test edit log file: " + editLog);
EditLogFileOutputStream elos = new EditLogFileOutputStream(conf, editLog.getAbsoluteFile(), 8192);
elos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
FSEditLogOp.OpInstanceCache cache = new FSEditLogOp.OpInstanceCache();
FSEditLogOp.MkdirOp mkdirOp = FSEditLogOp.MkdirOp.getInstance(cache);
mkdirOp.reset();
mkdirOp.setRpcCallId(123);
mkdirOp.setTransactionId(1);
mkdirOp.setInodeId(789L);
mkdirOp.setPath("/mydir");
PermissionStatus perms = PermissionStatus.createImmutable("myuser", "mygroup", FsPermission.createImmutable((short) 0777));
mkdirOp.setPermissionStatus(perms);
elos.write(mkdirOp);
mkdirOp.reset();
mkdirOp.setRpcCallId(456);
mkdirOp.setTransactionId(2);
mkdirOp.setInodeId(123L);
mkdirOp.setPath("/mydir2");
perms = PermissionStatus.createImmutable("myuser", "mygroup", FsPermission.createImmutable((short) 0666));
mkdirOp.setPermissionStatus(perms);
elos.write(mkdirOp);
elos.setReadyToFlush();
elos.flushAndSync(false);
elos.close();
long fileLen = editLog.length();
LOG.debug("Corrupting last 4 bytes of edit log file " + editLog + ", whose length is " + fileLen);
RandomAccessFile rwf = new RandomAccessFile(editLog, "rw");
rwf.seek(fileLen - 4);
int b = rwf.readInt();
rwf.seek(fileLen - 4);
rwf.writeInt(b + 1);
rwf.close();
EditLogFileInputStream elis = new EditLogFileInputStream(editLog);
Assert.assertEquals(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION, elis.getVersion(true));
Assert.assertEquals(1, elis.scanNextOp());
LOG.debug("Read transaction 1 from " + editLog);
try {
elis.scanNextOp();
Assert.fail("Expected scanNextOp to fail when op checksum was corrupt.");
} catch (IOException e) {
LOG.debug("Caught expected checksum error when reading corrupt " + "transaction 2", e);
GenericTestUtils.assertExceptionContains("Transaction is corrupt.", e);
}
elis.close();
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestEditLogRace method testSaveImageWhileSyncInProgress.
/**
* The logSync() method in FSEditLog is unsynchronized whiel syncing
* so that other threads can concurrently enqueue edits while the prior
* sync is ongoing. This test checks that the log is saved correctly
* if the saveImage occurs while the syncing thread is in the unsynchronized middle section.
*
* This replicates the following manual test proposed by Konstantin:
* I start the name-node in debugger.
* I do -mkdir and stop the debugger in logSync() just before it does flush.
* Then I enter safe mode with another client
* I start saveNamepsace and stop the debugger in
* FSImage.saveFSImage() -> FSEditLog.createEditLogFile()
* -> EditLogFileOutputStream.create() ->
* after truncating the file but before writing LAYOUT_VERSION into it.
* Then I let logSync() run.
* Then I terminate the name-node.
* After that the name-node wont start, since the edits file is broken.
*/
@Test
public void testSaveImageWhileSyncInProgress() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem = FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage = namesystem.getFSImage();
FSEditLog editLog = fsimage.getEditLog();
JournalAndStream jas = editLog.getJournals().get(0);
EditLogFileOutputStream spyElos = spy((EditLogFileOutputStream) jas.getCurrentStream());
jas.setCurrentStreamForTests(spyElos);
final AtomicReference<Throwable> deferredException = new AtomicReference<Throwable>();
final CountDownLatch waitToEnterFlush = new CountDownLatch(1);
final Thread doAnEditThread = new Thread() {
@Override
public void run() {
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test", new PermissionStatus("test", "test", new FsPermission((short) 00755)), true);
LOG.info("mkdirs complete");
} catch (Throwable ioe) {
LOG.fatal("Got exception", ioe);
deferredException.set(ioe);
waitToEnterFlush.countDown();
}
}
};
Answer<Void> blockingFlush = new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
LOG.info("Flush called");
if (useAsyncEditLog || Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it to flush section...");
// Signal to main thread that the edit thread is in the racy section
waitToEnterFlush.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME * 1000);
LOG.info("Going through to flush. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("Flush complete");
return null;
}
};
doAnswer(blockingFlush).when(spyElos).flush();
doAnEditThread.start();
// Wait for the edit thread to get to the logsync unsynchronized section
LOG.info("Main thread: waiting to enter flush...");
waitToEnterFlush.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync is in unsynchronized section.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long");
long st = Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et = Time.now();
LOG.info("Entered safe mode");
// Make sure we really waited for the flush to complete!
assertTrue(et - st > (BLOCK_TIME - 1) * 1000);
// Once we're in safe mode, save namespace.
namesystem.saveNamespace(0, 0);
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
// We did 3 edits: begin, txn, and end
assertEquals(3, verifyEditLogs(namesystem, fsimage, NNStorage.getFinalizedEditsFileName(1, 3), 1));
// after the save, just the one "begin"
assertEquals(1, verifyEditLogs(namesystem, fsimage, NNStorage.getInProgressEditsFileName(4), 4));
} finally {
LOG.info("Closing nn");
if (namesystem != null)
namesystem.close();
}
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestFSPermissionChecker method setUp.
@Before
public void setUp() throws IOException {
Configuration conf = new Configuration();
FSNamesystem fsn = mock(FSNamesystem.class);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
FsPermission perm = (FsPermission) args[0];
return new PermissionStatus(SUPERUSER, SUPERGROUP, perm);
}
}).when(fsn).createFsOwnerPermissions(any(FsPermission.class));
dir = new FSDirectory(fsn, conf);
inodeRoot = dir.getRoot();
}
Aggregations