use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestQuota method testNamespaceCommands.
/** Test commands that change the size of the name space:
* mkdirs, rename, and delete */
@Test
public void testNamespaceCommands() throws Exception {
final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(parent));
// 1: create directory nqdir0/qdir1/qdir20/nqdir30
assertTrue(dfs.mkdirs(new Path(parent, "nqdir0/qdir1/qdir20/nqdir30")));
// 2: set the quota of nqdir0/qdir1 to be 6
final Path quotaDir1 = new Path(parent, "nqdir0/qdir1");
dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET);
ContentSummary c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 6);
// 3: set the quota of nqdir0/qdir1/qdir20 to be 7
final Path quotaDir2 = new Path(parent, "nqdir0/qdir1/qdir20");
dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET);
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 7);
// 4: Create directory nqdir0/qdir1/qdir21 and set its quota to 2
final Path quotaDir3 = new Path(parent, "nqdir0/qdir1/qdir21");
assertTrue(dfs.mkdirs(quotaDir3));
dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET);
c = dfs.getContentSummary(quotaDir3);
compareQuotaUsage(c, dfs, quotaDir3);
assertEquals(c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), 2);
// 5: Create directory nqdir0/qdir1/qdir21/nqdir32
Path tempPath = new Path(quotaDir3, "nqdir32");
assertTrue(dfs.mkdirs(tempPath));
c = dfs.getContentSummary(quotaDir3);
compareQuotaUsage(c, dfs, quotaDir3);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 2);
// 6: Create directory nqdir0/qdir1/qdir21/nqdir33
tempPath = new Path(quotaDir3, "nqdir33");
boolean hasException = false;
try {
assertFalse(dfs.mkdirs(tempPath));
} catch (NSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
c = dfs.getContentSummary(quotaDir3);
compareQuotaUsage(c, dfs, quotaDir3);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 2);
// 7: Create directory nqdir0/qdir1/qdir20/nqdir31
tempPath = new Path(quotaDir2, "nqdir31");
assertTrue(dfs.mkdirs(tempPath));
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 6);
assertEquals(c.getQuota(), 6);
// 8: Create directory nqdir0/qdir1/qdir20/nqdir33
tempPath = new Path(quotaDir2, "nqdir33");
hasException = false;
try {
assertFalse(dfs.mkdirs(tempPath));
} catch (NSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// 9: Move nqdir0/qdir1/qdir21/nqdir32 nqdir0/qdir1/qdir20/nqdir30
tempPath = new Path(quotaDir2, "nqdir30");
dfs.rename(new Path(quotaDir3, "nqdir32"), tempPath);
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 4);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 6);
assertEquals(c.getQuota(), 6);
// 10: Move nqdir0/qdir1/qdir20/nqdir30 to nqdir0/qdir1/qdir21
hasException = false;
try {
assertFalse(dfs.rename(tempPath, quotaDir3));
} catch (NSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
assertTrue(dfs.exists(tempPath));
assertFalse(dfs.exists(new Path(quotaDir3, "nqdir30")));
// 10.a: Rename nqdir0/qdir1/qdir20/nqdir30 to nqdir0/qdir1/qdir21/nqdir32
hasException = false;
try {
assertFalse(dfs.rename(tempPath, new Path(quotaDir3, "nqdir32")));
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
assertTrue(dfs.exists(tempPath));
assertFalse(dfs.exists(new Path(quotaDir3, "nqdir32")));
// 11: Move nqdir0/qdir1/qdir20/nqdir30 to nqdir0
assertTrue(dfs.rename(tempPath, new Path(parent, "nqdir0")));
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 4);
assertEquals(c.getQuota(), 6);
// 12: Create directory nqdir0/nqdir30/nqdir33
assertTrue(dfs.mkdirs(new Path(parent, "nqdir0/nqdir30/nqdir33")));
// 13: Move nqdir0/nqdir30 nqdir0/qdir1/qdir20/qdir30
hasException = false;
try {
assertFalse(dfs.rename(new Path(parent, "nqdir0/nqdir30"), tempPath));
} catch (NSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// 14: Move nqdir0/qdir1/qdir21 nqdir0/qdir1/qdir20
assertTrue(dfs.rename(quotaDir3, quotaDir2));
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 4);
assertEquals(c.getQuota(), 6);
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 7);
tempPath = new Path(quotaDir2, "qdir21");
c = dfs.getContentSummary(tempPath);
compareQuotaUsage(c, dfs, tempPath);
assertEquals(c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), 2);
// 15: Delete nqdir0/qdir1/qdir20/qdir21
dfs.delete(tempPath, true);
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 6);
// 16: Move nqdir0/qdir30 nqdir0/qdir1/qdir20
assertTrue(dfs.rename(new Path(parent, "nqdir0/nqdir30"), quotaDir2));
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 5);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 6);
assertEquals(c.getQuota(), 6);
}
use of org.apache.hadoop.fs.ContentSummary in project ignite by apache.
the class IgniteHadoopFileSystem method getContentSummary.
/**
* {@inheritDoc}
*/
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
A.notNull(f, "f");
enterBusy();
try {
IgfsPathSummary sum = rmtClient.contentSummary(convert(f));
return new ContentSummary(sum.totalLength(), sum.filesCount(), sum.directoriesCount(), -1, sum.totalLength(), rmtClient.fsStatus().spaceTotal());
} finally {
leaveBusy();
}
}
use of org.apache.hadoop.fs.ContentSummary in project hive by apache.
the class TestReplicationScenarios method testRecycleFileDropTempTable.
@Test
public void testRecycleFileDropTempTable() throws IOException {
String dbName = createDB(testName.getMethodName(), driver);
run("CREATE TABLE " + dbName + ".normal(a int)", driver);
run("INSERT INTO " + dbName + ".normal values (1)", driver);
run("DROP TABLE " + dbName + ".normal", driver);
String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR);
Path path = new Path(cmDir);
FileSystem fs = path.getFileSystem(hconf);
ContentSummary cs = fs.getContentSummary(path);
long fileCount = cs.getFileCount();
assertTrue(fileCount != 0);
run("CREATE TABLE " + dbName + ".normal(a int)", driver);
run("INSERT INTO " + dbName + ".normal values (1)", driver);
run("CREATE TEMPORARY TABLE " + dbName + ".temp(a int)", driver);
run("INSERT INTO " + dbName + ".temp values (2)", driver);
run("INSERT OVERWRITE TABLE " + dbName + ".temp select * from " + dbName + ".normal", driver);
cs = fs.getContentSummary(path);
long fileCountAfter = cs.getFileCount();
assertTrue(fileCount == fileCountAfter);
run("INSERT INTO " + dbName + ".temp values (3)", driver);
run("TRUNCATE TABLE " + dbName + ".temp", driver);
cs = fs.getContentSummary(path);
fileCountAfter = cs.getFileCount();
assertTrue(fileCount == fileCountAfter);
run("INSERT INTO " + dbName + ".temp values (4)", driver);
run("ALTER TABLE " + dbName + ".temp RENAME to " + dbName + ".temp1", driver);
verifyRun("SELECT count(*) from " + dbName + ".temp1", new String[] { "1" }, driver);
cs = fs.getContentSummary(path);
fileCountAfter = cs.getFileCount();
assertTrue(fileCount == fileCountAfter);
run("INSERT INTO " + dbName + ".temp1 values (5)", driver);
run("DROP TABLE " + dbName + ".temp1", driver);
cs = fs.getContentSummary(path);
fileCountAfter = cs.getFileCount();
assertTrue(fileCount == fileCountAfter);
}
use of org.apache.hadoop.fs.ContentSummary in project hive by apache.
the class TestGetInputSummary method testGetInputSummaryWithMultipleThreads.
@Test
@SuppressWarnings("deprecation")
public void testGetInputSummaryWithMultipleThreads() throws IOException {
final int BYTES_PER_FILE = 5;
final Collection<Path> testPaths = Arrays.asList(new Path("p1/test.txt"), new Path("p2/test.txt"), new Path("p3/test.txt"), new Path("p4/test.txt"), new Path("p5/test.txt"));
jobConf.setInt(HiveConf.ConfVars.HIVE_EXEC_INPUT_LISTING_MAX_THREADS.varname, 2);
ContentSummary summary = runTestGetInputSummary(jobConf, properties, testPaths, BYTES_PER_FILE, HiveInputFormat.class, Collections.emptyMap());
assertEquals(testPaths.size() * BYTES_PER_FILE, summary.getLength());
assertEquals(testPaths.size(), summary.getFileCount());
assertEquals(testPaths.size(), summary.getDirectoryCount());
// Test deprecated mapred.dfsclient.parallelism.max
jobConf.setInt(HiveConf.ConfVars.HIVE_EXEC_INPUT_LISTING_MAX_THREADS.varname, 0);
jobConf.setInt(Utilities.DEPRECATED_MAPRED_DFSCLIENT_PARALLELISM_MAX, 2);
summary = runTestGetInputSummary(jobConf, properties, testPaths, BYTES_PER_FILE, HiveInputFormat.class, Collections.emptyMap());
assertEquals(testPaths.size() * BYTES_PER_FILE, summary.getLength());
assertEquals(testPaths.size(), summary.getFileCount());
assertEquals(testPaths.size(), summary.getDirectoryCount());
}
use of org.apache.hadoop.fs.ContentSummary in project hive by apache.
the class TestGetInputSummary method testGetInputSummaryWithInputEstimator.
@Test
public void testGetInputSummaryWithInputEstimator() throws IOException, HiveException {
final int BYTES_PER_FILE = 10;
final int NUM_OF_ROWS = 5;
final Collection<Path> testPaths = Arrays.asList(new Path("p1/test.txt"), new Path("p2/test.txt"), new Path("p3/test.txt"), new Path("p4/test.txt"), new Path("p5/test.txt"));
jobConf.setInt(HiveConf.ConfVars.HIVE_EXEC_INPUT_LISTING_MAX_THREADS.varname, 2);
properties.setProperty(hive_metastoreConstants.META_TABLE_STORAGE, InputEstimatorTestClass.class.getName());
InputEstimatorTestClass.setEstimation(new InputEstimator.Estimation(NUM_OF_ROWS, BYTES_PER_FILE));
/*
* Let's write more bytes to the files to test that Estimator is actually
* working returning the file size not from the filesystem
*/
ContentSummary summary = runTestGetInputSummary(jobConf, properties, testPaths, BYTES_PER_FILE * 2, HiveInputFormat.class, Collections.emptyMap());
assertEquals(testPaths.size() * BYTES_PER_FILE, summary.getLength());
// Current getInputSummary() returns -1 for each file found
assertEquals(testPaths.size() * -1, summary.getFileCount());
// Current getInputSummary() returns -1 for each file found
assertEquals(testPaths.size() * -1, summary.getDirectoryCount());
// Test deprecated mapred.dfsclient.parallelism.max
jobConf.setInt(HiveConf.ConfVars.HIVE_EXEC_INPUT_LISTING_MAX_THREADS.varname, 0);
jobConf.setInt(HiveConf.ConfVars.HIVE_EXEC_INPUT_LISTING_MAX_THREADS.varname, 2);
properties.setProperty(hive_metastoreConstants.META_TABLE_STORAGE, InputEstimatorTestClass.class.getName());
InputEstimatorTestClass.setEstimation(new InputEstimator.Estimation(NUM_OF_ROWS, BYTES_PER_FILE));
/*
* Let's write more bytes to the files to test that Estimator is actually
* working returning the file size not from the filesystem
*/
summary = runTestGetInputSummary(jobConf, properties, testPaths, BYTES_PER_FILE * 2, HiveInputFormat.class, Collections.emptyMap());
assertEquals(testPaths.size() * BYTES_PER_FILE, summary.getLength());
// Current getInputSummary() returns -1 for each file found
assertEquals(testPaths.size() * -1, summary.getFileCount());
// Current getInputSummary() returns -1 for each file found
assertEquals(testPaths.size() * -1, summary.getDirectoryCount());
}
Aggregations