Search in sources :

Example 16 with PathFilter

use of org.apache.hadoop.fs.PathFilter in project hbase by apache.

the class TestDistributedLogSplitting method testReadWriteSeqIdFiles.

@Test(timeout = 300000)
public void testReadWriteSeqIdFiles() throws Exception {
    LOG.info("testReadWriteSeqIdFiles");
    startCluster(2);
    final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
    Table ht = installTable(zkw, name.getMethodName(), "family", 10);
    try {
        FileSystem fs = master.getMasterFileSystem().getFileSystem();
        Path tableDir = FSUtils.getTableDir(FSUtils.getRootDir(conf), TableName.valueOf(name.getMethodName()));
        List<Path> regionDirs = FSUtils.getRegionDirs(fs, tableDir);
        long newSeqId = WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 1L, 1000L);
        WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 1L, 1000L);
        assertEquals(newSeqId + 2000, WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 3L, 1000L));
        Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(regionDirs.get(0));
        FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() {

            @Override
            public boolean accept(Path p) {
                return WALSplitter.isSequenceIdFile(p);
            }
        });
        // only one seqid file should exist
        assertEquals(1, files.length);
        // verify all seqId files aren't treated as recovered.edits files
        NavigableSet<Path> recoveredEdits = WALSplitter.getSplitEditFilesSorted(fs, regionDirs.get(0));
        assertEquals(0, recoveredEdits.size());
    } finally {
        if (ht != null)
            ht.close();
        if (zkw != null)
            zkw.close();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) PathFilter(org.apache.hadoop.fs.PathFilter) Table(org.apache.hadoop.hbase.client.Table) FileStatus(org.apache.hadoop.fs.FileStatus) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 17 with PathFilter

use of org.apache.hadoop.fs.PathFilter in project hbase by apache.

the class TestDeleteColumnFamilyProcedureFromClient method deleteColumnFamilyWithMultipleRegions.

@Test
public void deleteColumnFamilyWithMultipleRegions() throws Exception {
    Admin admin = TEST_UTIL.getAdmin();
    HTableDescriptor beforehtd = admin.getTableDescriptor(TABLENAME);
    FileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem();
    // 1 - Check if table exists in descriptor
    assertTrue(admin.isTableAvailable(TABLENAME));
    // 2 - Check if all three families exist in descriptor
    assertEquals(3, beforehtd.getColumnFamilyCount());
    HColumnDescriptor[] families = beforehtd.getColumnFamilies();
    for (int i = 0; i < families.length; i++) {
        assertTrue(families[i].getNameAsString().equals("cf" + (i + 1)));
    }
    // 3 - Check if table exists in FS
    Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), TABLENAME);
    assertTrue(fs.exists(tableDir));
    // 4 - Check if all the 3 column families exist in FS
    FileStatus[] fileStatus = fs.listStatus(tableDir);
    for (int i = 0; i < fileStatus.length; i++) {
        if (fileStatus[i].isDirectory() == true) {
            FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() {

                @Override
                public boolean accept(Path p) {
                    if (p.getName().contains(HConstants.RECOVERED_EDITS_DIR)) {
                        return false;
                    }
                    return true;
                }
            });
            int k = 1;
            for (int j = 0; j < cf.length; j++) {
                if (cf[j].isDirectory() == true && cf[j].getPath().getName().startsWith(".") == false) {
                    assertEquals(cf[j].getPath().getName(), "cf" + k);
                    k++;
                }
            }
        }
    }
    // TEST - Disable and delete the column family
    admin.disableTable(TABLENAME);
    admin.deleteColumnFamily(TABLENAME, Bytes.toBytes("cf2"));
    // 5 - Check if only 2 column families exist in the descriptor
    HTableDescriptor afterhtd = admin.getTableDescriptor(TABLENAME);
    assertEquals(2, afterhtd.getColumnFamilyCount());
    HColumnDescriptor[] newFamilies = afterhtd.getColumnFamilies();
    assertTrue(newFamilies[0].getNameAsString().equals("cf1"));
    assertTrue(newFamilies[1].getNameAsString().equals("cf3"));
    // 6 - Check if the second column family is gone from the FS
    fileStatus = fs.listStatus(tableDir);
    for (int i = 0; i < fileStatus.length; i++) {
        if (fileStatus[i].isDirectory() == true) {
            FileStatus[] cf = fs.listStatus(fileStatus[i].getPath(), new PathFilter() {

                @Override
                public boolean accept(Path p) {
                    if (WALSplitter.isSequenceIdFile(p)) {
                        return false;
                    }
                    return true;
                }
            });
            for (int j = 0; j < cf.length; j++) {
                if (cf[j].isDirectory() == true) {
                    assertFalse(cf[j].getPath().getName().equals("cf2"));
                }
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) PathFilter(org.apache.hadoop.fs.PathFilter) FileStatus(org.apache.hadoop.fs.FileStatus) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) FileSystem(org.apache.hadoop.fs.FileSystem) Admin(org.apache.hadoop.hbase.client.Admin) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 18 with PathFilter

use of org.apache.hadoop.fs.PathFilter in project hbase by apache.

the class TestWALSplit method getLogForRegion.

private Path[] getLogForRegion(Path rootdir, TableName table, String region) throws IOException {
    Path tdir = FSUtils.getTableDir(rootdir, table);
    @SuppressWarnings("deprecation") Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, Bytes.toString(region.getBytes())));
    FileStatus[] files = fs.listStatus(editsdir, new PathFilter() {

        @Override
        public boolean accept(Path p) {
            if (WALSplitter.isSequenceIdFile(p)) {
                return false;
            }
            return true;
        }
    });
    Path[] paths = new Path[files.length];
    for (int i = 0; i < files.length; i++) {
        paths[i] = files[i].getPath();
    }
    return paths;
}
Also used : Path(org.apache.hadoop.fs.Path) PathFilter(org.apache.hadoop.fs.PathFilter) FileStatus(org.apache.hadoop.fs.FileStatus)

Example 19 with PathFilter

use of org.apache.hadoop.fs.PathFilter in project ignite by apache.

the class HadoopTeraSortTest method teraValidate.

/**
     * Implements validation phase of the sample.
     * @throws Exception
     */
private void teraValidate() throws Exception {
    System.out.println("TeraValidate ===============================================================");
    getFileSystem().delete(new Path(validateOutDir), true);
    // Generate input data:
    int res = ToolRunner.run(new Configuration(), new TeraValidate(), new String[] { "-Dmapreduce.framework.name=local", sortOutDir, validateOutDir });
    assertEquals(0, res);
    FileStatus[] fileStatuses = getFileSystem().listStatus(new Path(validateOutDir), new PathFilter() {

        @Override
        public boolean accept(Path path) {
            // Typically name is "part-r-00000":
            return path.getName().startsWith("part-r-");
        }
    });
    // TeraValidate has only 1 reduce, so should be only 1 result file:
    assertEquals(1, fileStatuses.length);
    // The result file must contain only 1 line with the checksum, like this:
    // "checksum        7a27e2d0d55de",
    // typically it has length of 23 bytes.
    // If sorting was not correct, the result contains list of K-V pairs that are not ordered correctly.
    // In such case the size of the output will be much larger.
    long len = fileStatuses[0].getLen();
    assertTrue("TeraValidate length: " + len, len >= 16 && len <= 32);
}
Also used : Path(org.apache.hadoop.fs.Path) PathFilter(org.apache.hadoop.fs.PathFilter) FileStatus(org.apache.hadoop.fs.FileStatus) Configuration(org.apache.hadoop.conf.Configuration) HadoopConfiguration(org.apache.ignite.configuration.HadoopConfiguration) IgniteConfiguration(org.apache.ignite.configuration.IgniteConfiguration) TeraValidate(org.apache.hadoop.examples.terasort.TeraValidate)

Example 20 with PathFilter

use of org.apache.hadoop.fs.PathFilter in project lucene-solr by apache.

the class HdfsUpdateLog method getLogList.

public String[] getLogList(Path tlogDir) throws FileNotFoundException, IOException {
    final String prefix = TLOG_NAME + '.';
    FileStatus[] files = fs.listStatus(tlogDir, new PathFilter() {

        @Override
        public boolean accept(Path name) {
            return name.getName().startsWith(prefix);
        }
    });
    List<String> fileList = new ArrayList<>(files.length);
    for (FileStatus file : files) {
        fileList.add(file.getPath().getName());
    }
    return fileList.toArray(new String[0]);
}
Also used : Path(org.apache.hadoop.fs.Path) PathFilter(org.apache.hadoop.fs.PathFilter) FileStatus(org.apache.hadoop.fs.FileStatus) ArrayList(java.util.ArrayList)

Aggregations

PathFilter (org.apache.hadoop.fs.PathFilter)43 Path (org.apache.hadoop.fs.Path)41 FileStatus (org.apache.hadoop.fs.FileStatus)37 FileSystem (org.apache.hadoop.fs.FileSystem)18 IOException (java.io.IOException)16 ArrayList (java.util.ArrayList)11 Test (org.junit.Test)8 LocatedFileStatus (org.apache.hadoop.fs.LocatedFileStatus)5 InterruptedIOException (java.io.InterruptedIOException)4 Configuration (org.apache.hadoop.conf.Configuration)3 Admin (org.apache.hadoop.hbase.client.Admin)3 Table (org.apache.hadoop.hbase.client.Table)3 HRegion (org.apache.hadoop.hbase.regionserver.HRegion)3 ZooKeeperWatcher (org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher)3 URI (java.net.URI)2 HashMap (java.util.HashMap)2 ExecutionException (java.util.concurrent.ExecutionException)2 Exchange (org.apache.camel.Exchange)2 Message (org.apache.camel.Message)2 DefaultMessage (org.apache.camel.impl.DefaultMessage)2