Search in sources :

Example 1 with BufferedMutator

use of org.apache.hadoop.hbase.client.BufferedMutator in project hadoop by apache.

the class BaseTable method getTableMutator.

/**
   * Used to create a type-safe mutator for this table.
   *
   * @param hbaseConf used to read table name.
   * @param conn used to create a table from.
   * @return a type safe {@link BufferedMutator} for the entity table.
   * @throws IOException if any exception occurs while creating mutator for the
   *     table.
   */
public TypedBufferedMutator<T> getTableMutator(Configuration hbaseConf, Connection conn) throws IOException {
    TableName tableName = this.getTableName(hbaseConf);
    // Plain buffered mutator
    BufferedMutator bufferedMutator = conn.getBufferedMutator(tableName);
    // Now make this thing type safe.
    // This is how service initialization should hang on to this variable, with
    // the proper type
    TypedBufferedMutator<T> table = new BufferedMutatorDelegator<T>(bufferedMutator);
    return table;
}
Also used : TableName(org.apache.hadoop.hbase.TableName) BufferedMutator(org.apache.hadoop.hbase.client.BufferedMutator)

Example 2 with BufferedMutator

use of org.apache.hadoop.hbase.client.BufferedMutator in project hbase by apache.

the class IntegrationTestSendTraceRequests method insertData.

private LinkedBlockingQueue<Long> insertData() throws IOException, InterruptedException {
    LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<>(25000);
    BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
    byte[] value = new byte[300];
    for (int x = 0; x < 5000; x++) {
        TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
        try {
            for (int i = 0; i < 5; i++) {
                long rk = random.nextLong();
                rowKeys.add(rk);
                Put p = new Put(Bytes.toBytes(rk));
                for (int y = 0; y < 10; y++) {
                    random.nextBytes(value);
                    p.addColumn(familyName, Bytes.toBytes(random.nextLong()), value);
                }
                ht.mutate(p);
            }
            if ((x % 1000) == 0) {
                admin.flush(tableName);
            }
        } finally {
            traceScope.close();
        }
    }
    admin.flush(tableName);
    return rowKeys;
}
Also used : BufferedMutator(org.apache.hadoop.hbase.client.BufferedMutator) TraceScope(org.apache.htrace.TraceScope) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Put(org.apache.hadoop.hbase.client.Put)

Example 3 with BufferedMutator

use of org.apache.hadoop.hbase.client.BufferedMutator in project hbase by apache.

the class TestMobCompactor method testMajorCompactionFromAdmin.

@Test(timeout = 300000)
public void testMajorCompactionFromAdmin() throws Exception {
    resetConf();
    int mergeSize = 5000;
    // change the mob compaction merge size
    conf.setLong(MobConstants.MOB_COMPACTION_MERGEABLE_THRESHOLD, mergeSize);
    SecureRandom rng = new SecureRandom();
    byte[] keyBytes = new byte[AES.KEY_LENGTH];
    rng.nextBytes(keyBytes);
    String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
    Key cfKey = new SecretKeySpec(keyBytes, algorithm);
    byte[] encryptionKey = EncryptionUtil.wrapKey(conf, conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()), cfKey);
    final TableName tableName = TableName.valueOf(name.getMethodName());
    HTableDescriptor desc = new HTableDescriptor(tableName);
    HColumnDescriptor hcd1 = new HColumnDescriptor(family1);
    hcd1.setMobEnabled(true);
    hcd1.setMobThreshold(0);
    hcd1.setEncryptionType(algorithm);
    hcd1.setEncryptionKey(encryptionKey);
    HColumnDescriptor hcd2 = new HColumnDescriptor(family2);
    hcd2.setMobEnabled(true);
    hcd2.setMobThreshold(0);
    desc.addFamily(hcd1);
    desc.addFamily(hcd2);
    admin.createTable(desc, getSplitKeys());
    Table table = conn.getTable(tableName);
    BufferedMutator bufMut = conn.getBufferedMutator(tableName);
    int count = 4;
    // generate mob files
    loadData(admin, bufMut, tableName, count, rowNumPerFile);
    int rowNumPerRegion = count * rowNumPerFile;
    assertEquals("Before deleting: mob rows count", regionNum * rowNumPerRegion, countMobRows(table));
    assertEquals("Before deleting: mob cells count", regionNum * cellNumPerRow * rowNumPerRegion, countMobCells(table));
    assertEquals("Before deleting: mob file count", regionNum * count, countFiles(tableName, true, family1));
    createDelFile(table, tableName, Bytes.toBytes(family1), Bytes.toBytes(qf1));
    assertEquals("Before compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum), countMobRows(table));
    assertEquals("Before compaction: mob cells count", regionNum * (cellNumPerRow * rowNumPerRegion - delCellNum), countMobCells(table));
    assertEquals("Before compaction: family1 mob file count", regionNum * count, countFiles(tableName, true, family1));
    assertEquals("Before compaction: family2 mob file count", regionNum * count, countFiles(tableName, true, family2));
    assertEquals("Before compaction: family1 del file count", regionNum, countFiles(tableName, false, family1));
    assertEquals("Before compaction: family2 del file count", regionNum, countFiles(tableName, false, family2));
    // do the major mob compaction, it will force all files to compaction
    admin.majorCompact(tableName, hcd1.getName(), CompactType.MOB);
    waitUntilMobCompactionFinished(tableName);
    assertEquals("After compaction: mob rows count", regionNum * (rowNumPerRegion - delRowNum), countMobRows(table));
    assertEquals("After compaction: mob cells count", regionNum * (cellNumPerRow * rowNumPerRegion - delCellNum), countMobCells(table));
    assertEquals("After compaction: family1 mob file count", regionNum, countFiles(tableName, true, family1));
    assertEquals("After compaction: family2 mob file count", regionNum * count, countFiles(tableName, true, family2));
    assertEquals("After compaction: family1 del file count", 0, countFiles(tableName, false, family1));
    assertEquals("After compaction: family2 del file count", regionNum, countFiles(tableName, false, family2));
    Assert.assertTrue(verifyEncryption(tableName, family1));
    table.close();
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) BufferedMutator(org.apache.hadoop.hbase.client.BufferedMutator) SecretKeySpec(javax.crypto.spec.SecretKeySpec) SecureRandom(java.security.SecureRandom) Key(java.security.Key) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) Test(org.junit.Test)

Example 4 with BufferedMutator

use of org.apache.hadoop.hbase.client.BufferedMutator in project beam by apache.

the class HBaseIOTest method writeData.

/**
     * Helper function to create a table and return the rows that it created.
     */
private static void writeData(String tableId, int numRows) throws Exception {
    Connection connection = admin.getConnection();
    TableName tableName = TableName.valueOf(tableId);
    BufferedMutator mutator = connection.getBufferedMutator(tableName);
    List<Mutation> mutations = makeTableData(numRows);
    mutator.mutate(mutations);
    mutator.flush();
    mutator.close();
}
Also used : TableName(org.apache.hadoop.hbase.TableName) BufferedMutator(org.apache.hadoop.hbase.client.BufferedMutator) Connection(org.apache.hadoop.hbase.client.Connection) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 5 with BufferedMutator

use of org.apache.hadoop.hbase.client.BufferedMutator in project drill by apache.

the class TestTableGenerator method generateHBaseDataset2.

public static void generateHBaseDataset2(Connection conn, Admin admin, TableName tableName, int numberRegions) throws Exception {
    if (admin.tableExists(tableName)) {
        admin.disableTable(tableName);
        admin.deleteTable(tableName);
    }
    HTableDescriptor desc = new HTableDescriptor(tableName);
    desc.addFamily(new HColumnDescriptor("f"));
    if (numberRegions > 1) {
        admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions - 1));
    } else {
        admin.createTable(desc);
    }
    BufferedMutator table = conn.getBufferedMutator(tableName);
    int rowCount = 0;
    byte[] bytes = null;
    final int numColumns = 5;
    Random random = new Random();
    int iteration = 0;
    while (rowCount < 1000) {
        char rowKeyChar = 'a';
        for (int i = 0; i < numberRegions; i++) {
            Put p = new Put(("" + rowKeyChar + iteration).getBytes());
            for (int j = 1; j <= numColumns; j++) {
                bytes = new byte[5000];
                random.nextBytes(bytes);
                p.addColumn("f".getBytes(), ("c" + j).getBytes(), bytes);
            }
            table.mutate(p);
            ++rowKeyChar;
            ++rowCount;
        }
        ++iteration;
    }
    table.close();
    admin.flush(tableName);
}
Also used : Random(java.util.Random) HColumnDescriptor(org.apache.hadoop.hbase.HColumnDescriptor) BufferedMutator(org.apache.hadoop.hbase.client.BufferedMutator) Put(org.apache.hadoop.hbase.client.Put) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor)

Aggregations

BufferedMutator (org.apache.hadoop.hbase.client.BufferedMutator)25 Put (org.apache.hadoop.hbase.client.Put)19 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)16 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)16 PositionedByteRange (org.apache.hadoop.hbase.util.PositionedByteRange)8 SimplePositionedMutableByteRange (org.apache.hadoop.hbase.util.SimplePositionedMutableByteRange)8 TableName (org.apache.hadoop.hbase.TableName)3 Connection (org.apache.hadoop.hbase.client.Connection)3 Table (org.apache.hadoop.hbase.client.Table)3 ArrayList (java.util.ArrayList)2 Random (java.util.Random)2 Mutation (org.apache.hadoop.hbase.client.Mutation)2 IOException (java.io.IOException)1 Key (java.security.Key)1 SecureRandom (java.security.SecureRandom)1 Date (java.util.Date)1 Iterator (java.util.Iterator)1 Callable (java.util.concurrent.Callable)1 ExecutorService (java.util.concurrent.ExecutorService)1 Future (java.util.concurrent.Future)1