Search in sources :

Example 86 with Path

use of org.apache.hadoop.fs.Path in project hive by apache.

the class FSStatsPublisher method connect.

@Override
public boolean connect(StatsCollectionContext context) {
    conf = context.getHiveConf();
    List<String> statsDirs = context.getStatsTmpDirs();
    assert statsDirs.size() == 1 : "Found multiple stats dirs: " + statsDirs;
    Path statsDir = new Path(statsDirs.get(0));
    LOG.debug("Connecting to : " + statsDir);
    statsMap = new HashMap<String, Map<String, String>>();
    try {
        return statsDir.getFileSystem(conf).exists(statsDir);
    } catch (IOException e) {
        LOG.error("Failed to check if dir exists", e);
        return false;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) HashMap(java.util.HashMap) Map(java.util.Map)

Example 87 with Path

use of org.apache.hadoop.fs.Path in project hbase by apache.

the class TestTableSnapshotScanner method testScanner.

private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions, boolean shutdownCluster) throws Exception {
    setupCluster();
    TableName tableName = TableName.valueOf("testScanner");
    try {
        createTableAndSnapshot(util, tableName, snapshotName, numRegions);
        if (shutdownCluster) {
            util.shutdownMiniHBaseCluster();
        }
        Path restoreDir = util.getDataTestDirOnTestFS(snapshotName);
        // limit the scan
        Scan scan = new Scan(bbb, yyy);
        TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir, snapshotName, scan);
        verifyScanner(scanner, bbb, yyy);
        scanner.close();
    } finally {
        if (!shutdownCluster) {
            util.getAdmin().deleteSnapshot(snapshotName);
            util.deleteTable(tableName);
            tearDownCluster();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName)

Example 88 with Path

use of org.apache.hadoop.fs.Path in project hbase by apache.

the class TableSnapshotInputFormatTestBase method createTableAndSnapshot.

protected static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName, String snapshotName, byte[] startRow, byte[] endRow, int numRegions) throws Exception {
    try {
        util.deleteTable(tableName);
    } catch (Exception ex) {
    // ignore
    }
    if (numRegions > 1) {
        util.createTable(tableName, FAMILIES, 1, startRow, endRow, numRegions);
    } else {
        util.createTable(tableName, FAMILIES);
    }
    Admin admin = util.getAdmin();
    // put some stuff in the table
    Table table = util.getConnection().getTable(tableName);
    util.loadTable(table, FAMILIES);
    Path rootDir = FSUtils.getRootDir(util.getConfiguration());
    FileSystem fs = rootDir.getFileSystem(util.getConfiguration());
    SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);
    // load different values
    byte[] value = Bytes.toBytes("after_snapshot_value");
    util.loadTable(table, FAMILIES, value);
    // cause flush to create new files in the region
    admin.flush(tableName);
    table.close();
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hbase.client.Table) FileSystem(org.apache.hadoop.fs.FileSystem) Admin(org.apache.hadoop.hbase.client.Admin) IOException(java.io.IOException)

Example 89 with Path

use of org.apache.hadoop.fs.Path in project hbase by apache.

the class TestTableSnapshotInputFormat method testWithMockedMapReduce.

@Override
protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, int numRegions, int expectedNumSplits) throws Exception {
    setupCluster();
    final TableName tableName = TableName.valueOf(name.getMethodName());
    try {
        createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);
        JobConf job = new JobConf(util.getConfiguration());
        Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName);
        TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir);
        // mapred doesn't support start and end keys? o.O
        verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow());
    } finally {
        util.getAdmin().deleteSnapshot(snapshotName);
        util.deleteTable(tableName);
        tearDownCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) JobConf(org.apache.hadoop.mapred.JobConf)

Example 90 with Path

use of org.apache.hadoop.fs.Path in project hbase by apache.

the class TestCatalogJanitor method createReferences.

/**
   * @param services Master services instance.
   * @param htd
   * @param parent
   * @param daughter
   * @param midkey
   * @param top True if we are to write a 'top' reference.
   * @return Path to reference we created.
   * @throws IOException
   */
private Path createReferences(final MasterServices services, final HTableDescriptor htd, final HRegionInfo parent, final HRegionInfo daughter, final byte[] midkey, final boolean top) throws IOException {
    Path rootdir = services.getMasterFileSystem().getRootDir();
    Path tabledir = FSUtils.getTableDir(rootdir, parent.getTable());
    Path storedir = HStore.getStoreHomedir(tabledir, daughter, htd.getColumnFamilies()[0].getName());
    Reference ref = top ? Reference.createTopReference(midkey) : Reference.createBottomReference(midkey);
    long now = System.currentTimeMillis();
    // Reference name has this format: StoreFile#REF_NAME_PARSER
    Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName());
    FileSystem fs = services.getMasterFileSystem().getFileSystem();
    ref.write(fs, p);
    return p;
}
Also used : Path(org.apache.hadoop.fs.Path) Reference(org.apache.hadoop.hbase.io.Reference) FileSystem(org.apache.hadoop.fs.FileSystem)

Aggregations

Path (org.apache.hadoop.fs.Path)11752 Test (org.junit.Test)4193 FileSystem (org.apache.hadoop.fs.FileSystem)3587 IOException (java.io.IOException)2631 Configuration (org.apache.hadoop.conf.Configuration)2621 FileStatus (org.apache.hadoop.fs.FileStatus)1568 ArrayList (java.util.ArrayList)1145 File (java.io.File)987 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)924 HashMap (java.util.HashMap)570 Job (org.apache.hadoop.mapreduce.Job)492 JobConf (org.apache.hadoop.mapred.JobConf)477 URI (java.net.URI)465 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)462 FileNotFoundException (java.io.FileNotFoundException)441 FsPermission (org.apache.hadoop.fs.permission.FsPermission)375 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)362 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)355 Map (java.util.Map)326 List (java.util.List)316