use of org.apache.hadoop.fs.Path in project hive by apache.
the class FSStatsPublisher method connect.
@Override
public boolean connect(StatsCollectionContext context) {
conf = context.getHiveConf();
List<String> statsDirs = context.getStatsTmpDirs();
assert statsDirs.size() == 1 : "Found multiple stats dirs: " + statsDirs;
Path statsDir = new Path(statsDirs.get(0));
LOG.debug("Connecting to : " + statsDir);
statsMap = new HashMap<String, Map<String, String>>();
try {
return statsDir.getFileSystem(conf).exists(statsDir);
} catch (IOException e) {
LOG.error("Failed to check if dir exists", e);
return false;
}
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TestTableSnapshotScanner method testScanner.
private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions, boolean shutdownCluster) throws Exception {
setupCluster();
TableName tableName = TableName.valueOf("testScanner");
try {
createTableAndSnapshot(util, tableName, snapshotName, numRegions);
if (shutdownCluster) {
util.shutdownMiniHBaseCluster();
}
Path restoreDir = util.getDataTestDirOnTestFS(snapshotName);
// limit the scan
Scan scan = new Scan(bbb, yyy);
TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir, snapshotName, scan);
verifyScanner(scanner, bbb, yyy);
scanner.close();
} finally {
if (!shutdownCluster) {
util.getAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
tearDownCluster();
}
}
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TableSnapshotInputFormatTestBase method createTableAndSnapshot.
protected static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName, String snapshotName, byte[] startRow, byte[] endRow, int numRegions) throws Exception {
try {
util.deleteTable(tableName);
} catch (Exception ex) {
// ignore
}
if (numRegions > 1) {
util.createTable(tableName, FAMILIES, 1, startRow, endRow, numRegions);
} else {
util.createTable(tableName, FAMILIES);
}
Admin admin = util.getAdmin();
// put some stuff in the table
Table table = util.getConnection().getTable(tableName);
util.loadTable(table, FAMILIES);
Path rootDir = FSUtils.getRootDir(util.getConfiguration());
FileSystem fs = rootDir.getFileSystem(util.getConfiguration());
SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);
// load different values
byte[] value = Bytes.toBytes("after_snapshot_value");
util.loadTable(table, FAMILIES, value);
// cause flush to create new files in the region
admin.flush(tableName);
table.close();
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TestTableSnapshotInputFormat method testWithMockedMapReduce.
@Override
protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, int numRegions, int expectedNumSplits) throws Exception {
setupCluster();
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);
JobConf job = new JobConf(util.getConfiguration());
Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName);
TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir);
// mapred doesn't support start and end keys? o.O
verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow());
} finally {
util.getAdmin().deleteSnapshot(snapshotName);
util.deleteTable(tableName);
tearDownCluster();
}
}
use of org.apache.hadoop.fs.Path in project hbase by apache.
the class TestCatalogJanitor method createReferences.
/**
* @param services Master services instance.
* @param htd
* @param parent
* @param daughter
* @param midkey
* @param top True if we are to write a 'top' reference.
* @return Path to reference we created.
* @throws IOException
*/
private Path createReferences(final MasterServices services, final HTableDescriptor htd, final HRegionInfo parent, final HRegionInfo daughter, final byte[] midkey, final boolean top) throws IOException {
Path rootdir = services.getMasterFileSystem().getRootDir();
Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
Path storedir = HStore.getStoreHomedir(tabledir, daughter, htd.getColumnFamilies()[0].getName());
Reference ref = top ? Reference.createTopReference(midkey) : Reference.createBottomReference(midkey);
long now = System.currentTimeMillis();
// Reference name has this format: StoreFile#REF_NAME_PARSER
Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
FileSystem fs = services.getMasterFileSystem().getFileSystem();
ref.write(fs, p);
return p;
}
Aggregations