Search in sources :

Example 61 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class TestVisibilityLabelsWithDeletes method addLabels.

public static void addLabels() throws Exception {
    PrivilegedExceptionAction<VisibilityLabelsResponse> action = new PrivilegedExceptionAction<VisibilityLabelsResponse>() {

        @Override
        public VisibilityLabelsResponse run() throws Exception {
            String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE };
            try (Connection conn = ConnectionFactory.createConnection(conf)) {
                VisibilityClient.addLabels(conn, labels);
            } catch (Throwable t) {
                throw new IOException(t);
            }
            return null;
        }
    };
    SUPERUSER.runAs(action);
}
Also used : Connection(org.apache.hadoop.hbase.client.Connection) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) VisibilityLabelsResponse(org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsResponse)

Example 62 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class TestVisibilityLabelsWithDeletes method testVisibilityLabelsWithDeleteColumnWithSpecificVersionWithPutsReAppearing.

@Test
public void testVisibilityLabelsWithDeleteColumnWithSpecificVersionWithPutsReAppearing() throws Exception {
    final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
    Admin hBaseAdmin = TEST_UTIL.getAdmin();
    HColumnDescriptor colDesc = new HColumnDescriptor(fam);
    colDesc.setMaxVersions(5);
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(colDesc);
    hBaseAdmin.createTable(desc);
    try (Table table = TEST_UTIL.getConnection().getTable(tableName)) {
        Put put1 = new Put(Bytes.toBytes("row1"));
        put1.addColumn(fam, qual, 123l, value);
        put1.setCellVisibility(new CellVisibility(CONFIDENTIAL));
        Put put2 = new Put(Bytes.toBytes("row1"));
        put2.addColumn(fam, qual, 123l, value1);
        put2.setCellVisibility(new CellVisibility(SECRET));
        table.put(createList(put1, put2));
        Scan s = new Scan();
        s.setMaxVersions(5);
        s.setAuthorizations(new Authorizations(CONFIDENTIAL, SECRET));
        ResultScanner scanner = table.getScanner(s);
        assertEquals(scanner.next(3).length, 1);
        scanner.close();
        PrivilegedExceptionAction<Void> actiona = new PrivilegedExceptionAction<Void>() {

            @Override
            public Void run() throws Exception {
                try (Connection connection = ConnectionFactory.createConnection(conf);
                    Table table = connection.getTable(tableName)) {
                    Delete d = new Delete(row1);
                    d.setCellVisibility(new CellVisibility(CONFIDENTIAL));
                    d.addColumn(fam, qual, 123l);
                    table.delete(d);
                }
                try (Connection connection = ConnectionFactory.createConnection(conf);
                    Table table = connection.getTable(tableName)) {
                    Delete d = new Delete(row1);
                    d.setCellVisibility(new CellVisibility(SECRET));
                    d.addColumn(fam, qual, 123l);
                    table.delete(d);
                } catch (Throwable t) {
                    throw new IOException(t);
                }
                return null;
            }
        };
        SUPERUSER.runAs(actiona);
        s = new Scan();
        s.setMaxVersions(5);
        s.setAuthorizations(new Authorizations(CONFIDENTIAL));
        scanner = table.getScanner(s);
        assertEquals(scanner.next(3).length, 0);
        scanner.close();
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) Connection(org.apache.hadoop.hbase.client.Connection) PrivilegedExceptionAction(java.security.PrivilegedExceptionAction) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) Scan(org.apache.hadoop.hbase.client.Scan) Test(org.junit.Test)

Example 63 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class TestHFileOutputFormat2 method testExcludeAllFromMinorCompaction.

/**
   * This test is to test the scenario happened in HBASE-6901.
   * All files are bulk loaded and excluded from minor compaction.
   * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException
   * will be thrown.
   */
@Ignore("Flakey: See HBASE-9051")
@Test
public void testExcludeAllFromMinorCompaction() throws Exception {
    Configuration conf = util.getConfiguration();
    conf.setInt("hbase.hstore.compaction.min", 2);
    generateRandomStartKeys(5);
    util.startMiniCluster();
    try (Connection conn = ConnectionFactory.createConnection();
        Admin admin = conn.getAdmin();
        Table table = util.createTable(TABLE_NAME, FAMILIES);
        RegionLocator locator = conn.getRegionLocator(TABLE_NAME)) {
        final FileSystem fs = util.getDFSCluster().getFileSystem();
        assertEquals("Should start with empty table", 0, util.countRows(table));
        // deep inspection: get the StoreFile dir
        final Path storePath = new Path(FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(), Bytes.toString(FAMILIES[0])));
        assertEquals(0, fs.listStatus(storePath).length);
        // Generate two bulk load files
        conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true);
        for (int i = 0; i < 2; i++) {
            Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i);
            runIncrementalPELoad(conf, table.getTableDescriptor(), conn.getRegionLocator(TABLE_NAME), testDir, false);
            // Perform the actual load
            new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator);
        }
        // Ensure data shows up
        int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
        assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, util.countRows(table));
        // should have a second StoreFile now
        assertEquals(2, fs.listStatus(storePath).length);
        // minor compactions shouldn't get rid of the file
        admin.compact(TABLE_NAME);
        try {
            quickPoll(new Callable<Boolean>() {

                @Override
                public Boolean call() throws Exception {
                    List<HRegion> regions = util.getMiniHBaseCluster().getRegions(TABLE_NAME);
                    for (HRegion region : regions) {
                        for (Store store : region.getStores()) {
                            store.closeAndArchiveCompactedFiles();
                        }
                    }
                    return fs.listStatus(storePath).length == 1;
                }
            }, 5000);
            throw new IOException("SF# = " + fs.listStatus(storePath).length);
        } catch (AssertionError ae) {
        // this is expected behavior
        }
        // a major compaction should work though
        admin.majorCompact(TABLE_NAME);
        quickPoll(new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                List<HRegion> regions = util.getMiniHBaseCluster().getRegions(TABLE_NAME);
                for (HRegion region : regions) {
                    for (Store store : region.getStores()) {
                        store.closeAndArchiveCompactedFiles();
                    }
                }
                return fs.listStatus(storePath).length == 1;
            }
        }, 5000);
    } finally {
        util.shutdownMiniCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Connection(org.apache.hadoop.hbase.client.Connection) Store(org.apache.hadoop.hbase.regionserver.Store) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) List(java.util.List) ArrayList(java.util.ArrayList) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 64 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class TestHFileOutputFormat2 method testExcludeMinorCompaction.

@Ignore("Goes zombie too frequently; needs work. See HBASE-14563")
@Test
public void testExcludeMinorCompaction() throws Exception {
    Configuration conf = util.getConfiguration();
    conf.setInt("hbase.hstore.compaction.min", 2);
    generateRandomStartKeys(5);
    util.startMiniCluster();
    try (Connection conn = ConnectionFactory.createConnection(conf);
        Admin admin = conn.getAdmin()) {
        Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction");
        final FileSystem fs = util.getDFSCluster().getFileSystem();
        Table table = util.createTable(TABLE_NAME, FAMILIES);
        assertEquals("Should start with empty table", 0, util.countRows(table));
        // deep inspection: get the StoreFile dir
        final Path storePath = new Path(FSUtils.getTableDir(FSUtils.getRootDir(conf), TABLE_NAME), new Path(admin.getTableRegions(TABLE_NAME).get(0).getEncodedName(), Bytes.toString(FAMILIES[0])));
        assertEquals(0, fs.listStatus(storePath).length);
        // put some data in it and flush to create a storefile
        Put p = new Put(Bytes.toBytes("test"));
        p.addColumn(FAMILIES[0], Bytes.toBytes("1"), Bytes.toBytes("1"));
        table.put(p);
        admin.flush(TABLE_NAME);
        assertEquals(1, util.countRows(table));
        quickPoll(new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                return fs.listStatus(storePath).length == 1;
            }
        }, 5000);
        // Generate a bulk load file with more rows
        conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true);
        RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAME);
        runIncrementalPELoad(conf, table.getTableDescriptor(), regionLocator, testDir, false);
        // Perform the actual load
        new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator);
        // Ensure data shows up
        int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT;
        assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows + 1, util.countRows(table));
        // should have a second StoreFile now
        assertEquals(2, fs.listStatus(storePath).length);
        // minor compactions shouldn't get rid of the file
        admin.compact(TABLE_NAME);
        try {
            quickPoll(new Callable<Boolean>() {

                @Override
                public Boolean call() throws Exception {
                    return fs.listStatus(storePath).length == 1;
                }
            }, 5000);
            throw new IOException("SF# = " + fs.listStatus(storePath).length);
        } catch (AssertionError ae) {
        // this is expected behavior
        }
        // a major compaction should work though
        admin.majorCompact(TABLE_NAME);
        quickPoll(new Callable<Boolean>() {

            @Override
            public Boolean call() throws Exception {
                return fs.listStatus(storePath).length == 1;
            }
        }, 5000);
    } finally {
        util.shutdownMiniCluster();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) HBaseConfiguration(org.apache.hadoop.hbase.HBaseConfiguration) Connection(org.apache.hadoop.hbase.client.Connection) IOException(java.io.IOException) Admin(org.apache.hadoop.hbase.client.Admin) Put(org.apache.hadoop.hbase.client.Put) UnsupportedEncodingException(java.io.UnsupportedEncodingException) IOException(java.io.IOException) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 65 with Connection

use of org.apache.hadoop.hbase.client.Connection in project hbase by apache.

the class TestLoadIncrementalHFilesSplitRecovery method testSplitTmpFileCleanUp.

/**
   * This test creates a table with many small regions.  The bulk load files
   * would be splitted multiple times before all of them can be loaded successfully.
   */
@Test(timeout = 120000)
public void testSplitTmpFileCleanUp() throws Exception {
    final TableName table = TableName.valueOf(name.getMethodName());
    byte[][] SPLIT_KEYS = new byte[][] { Bytes.toBytes("row_00000010"), Bytes.toBytes("row_00000020"), Bytes.toBytes("row_00000030"), Bytes.toBytes("row_00000040"), Bytes.toBytes("row_00000050") };
    try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        setupTableWithSplitkeys(table, 10, SPLIT_KEYS);
        LoadIncrementalHFiles lih = new LoadIncrementalHFiles(util.getConfiguration());
        // create HFiles
        Path bulk = buildBulkFiles(table, 2);
        try (Table t = connection.getTable(table);
            RegionLocator locator = connection.getRegionLocator(table);
            Admin admin = connection.getAdmin()) {
            lih.doBulkLoad(bulk, admin, t, locator);
        }
        // family path
        Path tmpPath = new Path(bulk, family(0));
        // TMP_DIR under family path
        tmpPath = new Path(tmpPath, LoadIncrementalHFiles.TMP_DIR);
        FileSystem fs = bulk.getFileSystem(util.getConfiguration());
        // HFiles have been splitted, there is TMP_DIR
        assertTrue(fs.exists(tmpPath));
        // TMP_DIR should have been cleaned-up
        assertNull(LoadIncrementalHFiles.TMP_DIR + " should be empty.", FSUtils.listStatus(fs, tmpPath));
        assertExpectedTable(connection, table, ROWCOUNT, 2);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) TableName(org.apache.hadoop.hbase.TableName) RegionLocator(org.apache.hadoop.hbase.client.RegionLocator) Table(org.apache.hadoop.hbase.client.Table) FileSystem(org.apache.hadoop.fs.FileSystem) ClusterConnection(org.apache.hadoop.hbase.client.ClusterConnection) Connection(org.apache.hadoop.hbase.client.Connection) Admin(org.apache.hadoop.hbase.client.Admin) Test(org.junit.Test)

Aggregations

Connection (org.apache.hadoop.hbase.client.Connection)307 Table (org.apache.hadoop.hbase.client.Table)194 Test (org.junit.Test)174 IOException (java.io.IOException)117 TableName (org.apache.hadoop.hbase.TableName)103 Result (org.apache.hadoop.hbase.client.Result)102 Admin (org.apache.hadoop.hbase.client.Admin)90 Scan (org.apache.hadoop.hbase.client.Scan)81 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)77 PrivilegedExceptionAction (java.security.PrivilegedExceptionAction)71 Put (org.apache.hadoop.hbase.client.Put)68 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)58 Delete (org.apache.hadoop.hbase.client.Delete)55 Configuration (org.apache.hadoop.conf.Configuration)54 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)52 Get (org.apache.hadoop.hbase.client.Get)48 InterruptedIOException (java.io.InterruptedIOException)45 Cell (org.apache.hadoop.hbase.Cell)41 CellScanner (org.apache.hadoop.hbase.CellScanner)34 ArrayList (java.util.ArrayList)26