Search in sources :

Example 51 with Put

use of org.apache.hadoop.hbase.client.Put in project hbase by apache.

the class TestForceCacheImportantBlocks method writeTestData.

private void writeTestData(Region region) throws IOException {
    for (int i = 0; i < NUM_ROWS; ++i) {
        Put put = new Put(Bytes.toBytes("row" + i));
        for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
            for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) {
                put.addColumn(CF_BYTES, Bytes.toBytes("col" + j), ts, Bytes.toBytes("value" + i + "_" + j + "_" + ts));
            }
        }
        region.put(put);
        if ((i + 1) % ROWS_PER_HFILE == 0) {
            region.flush(true);
        }
    }
}
Also used : Put(org.apache.hadoop.hbase.client.Put)

Example 52 with Put

use of org.apache.hadoop.hbase.client.Put in project javaee7-samples by javaee-samples.

the class PersonSessionBean method createPerson.

public void createPerson() throws IOException {
    try (HTableInterface table = pool.getTable(personsTable)) {
        Put put = new Put(Bytes.toBytes(person.getName()), Calendar.getInstance().getTime().getTime());
        put.add(Bytes.toBytes(personsColumnFamily), Bytes.toBytes("name"), Calendar.getInstance().getTime().getTime(), Bytes.toBytes(person.getName()));
        put.add(Bytes.toBytes(personsColumnFamily), Bytes.toBytes("age"), Calendar.getInstance().getTime().getTime(), Bytes.toBytes(person.getAge()));
        table.put(put);
    }
}
Also used : HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put)

Example 53 with Put

use of org.apache.hadoop.hbase.client.Put in project hadoop by apache.

the class TestHBaseStorageFlowRunCompaction method testWriteNonNumericData.

/** Writes non numeric data into flow run table
   * reads it back.
   *
   * @throws Exception
   */
@Test
public void testWriteNonNumericData() throws Exception {
    String rowKey = "nonNumericRowKey";
    String column = "nonNumericColumnName";
    String value = "nonNumericValue";
    byte[] rowKeyBytes = Bytes.toBytes(rowKey);
    byte[] columnNameBytes = Bytes.toBytes(column);
    byte[] valueBytes = Bytes.toBytes(value);
    Put p = new Put(rowKeyBytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnNameBytes, valueBytes);
    Configuration hbaseConf = util.getConfiguration();
    TableName table = TableName.valueOf(hbaseConf.get(FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
    Connection conn = null;
    conn = ConnectionFactory.createConnection(hbaseConf);
    Table flowRunTable = conn.getTable(table);
    flowRunTable.put(p);
    Get g = new Get(rowKeyBytes);
    Result r = flowRunTable.get(g);
    assertNotNull(r);
    assertTrue(r.size() >= 1);
    Cell actualValue = r.getColumnLatestCell(FlowRunColumnFamily.INFO.getBytes(), columnNameBytes);
    assertNotNull(CellUtil.cloneValue(actualValue));
    assertEquals(Bytes.toString(CellUtil.cloneValue(actualValue)), value);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) Configuration(org.apache.hadoop.conf.Configuration) Get(org.apache.hadoop.hbase.client.Get) Connection(org.apache.hadoop.hbase.client.Connection) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 54 with Put

use of org.apache.hadoop.hbase.client.Put in project hadoop by apache.

the class TestHBaseStorageFlowRunCompaction method testWriteScanBatchLimit.

@Test
public void testWriteScanBatchLimit() throws Exception {
    String rowKey = "nonNumericRowKey";
    String column = "nonNumericColumnName";
    String value = "nonNumericValue";
    String column2 = "nonNumericColumnName2";
    String value2 = "nonNumericValue2";
    String column3 = "nonNumericColumnName3";
    String value3 = "nonNumericValue3";
    String column4 = "nonNumericColumnName4";
    String value4 = "nonNumericValue4";
    byte[] rowKeyBytes = Bytes.toBytes(rowKey);
    byte[] columnNameBytes = Bytes.toBytes(column);
    byte[] valueBytes = Bytes.toBytes(value);
    byte[] columnName2Bytes = Bytes.toBytes(column2);
    byte[] value2Bytes = Bytes.toBytes(value2);
    byte[] columnName3Bytes = Bytes.toBytes(column3);
    byte[] value3Bytes = Bytes.toBytes(value3);
    byte[] columnName4Bytes = Bytes.toBytes(column4);
    byte[] value4Bytes = Bytes.toBytes(value4);
    Put p = new Put(rowKeyBytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnNameBytes, valueBytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName2Bytes, value2Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName3Bytes, value3Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName4Bytes, value4Bytes);
    Configuration hbaseConf = util.getConfiguration();
    TableName table = TableName.valueOf(hbaseConf.get(FlowRunTable.TABLE_NAME_CONF_NAME, FlowRunTable.DEFAULT_TABLE_NAME));
    Connection conn = null;
    conn = ConnectionFactory.createConnection(hbaseConf);
    Table flowRunTable = conn.getTable(table);
    flowRunTable.put(p);
    String rowKey2 = "nonNumericRowKey2";
    byte[] rowKey2Bytes = Bytes.toBytes(rowKey2);
    p = new Put(rowKey2Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnNameBytes, valueBytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName2Bytes, value2Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName3Bytes, value3Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName4Bytes, value4Bytes);
    flowRunTable.put(p);
    String rowKey3 = "nonNumericRowKey3";
    byte[] rowKey3Bytes = Bytes.toBytes(rowKey3);
    p = new Put(rowKey3Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnNameBytes, valueBytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName2Bytes, value2Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName3Bytes, value3Bytes);
    p.addColumn(FlowRunColumnFamily.INFO.getBytes(), columnName4Bytes, value4Bytes);
    flowRunTable.put(p);
    Scan s = new Scan();
    s.addFamily(FlowRunColumnFamily.INFO.getBytes());
    s.setStartRow(rowKeyBytes);
    // set number of cells to fetch per scanner next invocation
    int batchLimit = 2;
    s.setBatch(batchLimit);
    ResultScanner scanner = flowRunTable.getScanner(s);
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        assertTrue(result.rawCells().length <= batchLimit);
        Map<byte[], byte[]> values = result.getFamilyMap(FlowRunColumnFamily.INFO.getBytes());
        assertTrue(values.size() <= batchLimit);
    }
    s = new Scan();
    s.addFamily(FlowRunColumnFamily.INFO.getBytes());
    s.setStartRow(rowKeyBytes);
    // set number of cells to fetch per scanner next invocation
    batchLimit = 3;
    s.setBatch(batchLimit);
    scanner = flowRunTable.getScanner(s);
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        assertTrue(result.rawCells().length <= batchLimit);
        Map<byte[], byte[]> values = result.getFamilyMap(FlowRunColumnFamily.INFO.getBytes());
        assertTrue(values.size() <= batchLimit);
    }
    s = new Scan();
    s.addFamily(FlowRunColumnFamily.INFO.getBytes());
    s.setStartRow(rowKeyBytes);
    // set number of cells to fetch per scanner next invocation
    batchLimit = 1000;
    s.setBatch(batchLimit);
    scanner = flowRunTable.getScanner(s);
    int rowCount = 0;
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        assertTrue(result.rawCells().length <= batchLimit);
        Map<byte[], byte[]> values = result.getFamilyMap(FlowRunColumnFamily.INFO.getBytes());
        assertTrue(values.size() <= batchLimit);
        // we expect all back in one next call
        assertEquals(4, values.size());
        rowCount++;
    }
    // should get back 1 row with each invocation
    // if scan batch is set sufficiently high
    assertEquals(3, rowCount);
    // test with a negative number
    // should have same effect as setting it to a high number
    s = new Scan();
    s.addFamily(FlowRunColumnFamily.INFO.getBytes());
    s.setStartRow(rowKeyBytes);
    // set number of cells to fetch per scanner next invocation
    batchLimit = -2992;
    s.setBatch(batchLimit);
    scanner = flowRunTable.getScanner(s);
    rowCount = 0;
    for (Result result : scanner) {
        assertNotNull(result);
        assertTrue(!result.isEmpty());
        assertEquals(4, result.rawCells().length);
        Map<byte[], byte[]> values = result.getFamilyMap(FlowRunColumnFamily.INFO.getBytes());
        // we expect all back in one next call
        assertEquals(4, values.size());
        System.out.println(" values size " + values.size() + " " + batchLimit);
        rowCount++;
    }
    // should get back 1 row with each invocation
    // if scan batch is set sufficiently high
    assertEquals(3, rowCount);
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Configuration(org.apache.hadoop.conf.Configuration) Connection(org.apache.hadoop.hbase.client.Connection) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 55 with Put

use of org.apache.hadoop.hbase.client.Put in project hadoop by apache.

the class ColumnHelper method store.

/**
   * Sends a Mutation to the table. The mutations will be buffered and sent over
   * the wire as part of a batch.
   *
   * @param rowKey
   *          identifying the row to write. Nothing gets written when null.
   * @param tableMutator
   *          used to modify the underlying HBase table
   * @param columnQualifier
   *          column qualifier. Nothing gets written when null.
   * @param timestamp
   *          version timestamp. When null the current timestamp multiplied with
   *          TimestampGenerator.TS_MULTIPLIER and added with last 3 digits of
   *          app id will be used
   * @param inputValue
   *          the value to write to the rowKey and column qualifier. Nothing
   *          gets written when null.
   * @param attributes Attributes to be set for HBase Put.
   * @throws IOException if any problem occurs during store operation(sending
   *          mutation to table).
   */
public void store(byte[] rowKey, TypedBufferedMutator<?> tableMutator, byte[] columnQualifier, Long timestamp, Object inputValue, Attribute... attributes) throws IOException {
    if ((rowKey == null) || (columnQualifier == null) || (inputValue == null)) {
        return;
    }
    Put p = new Put(rowKey);
    timestamp = getPutTimestamp(timestamp, attributes);
    p.addColumn(columnFamilyBytes, columnQualifier, timestamp, converter.encodeValue(inputValue));
    if ((attributes != null) && (attributes.length > 0)) {
        for (Attribute attribute : attributes) {
            p.setAttribute(attribute.getName(), attribute.getValue());
        }
    }
    tableMutator.mutate(p);
}
Also used : Attribute(org.apache.hadoop.yarn.server.timelineservice.storage.flow.Attribute) Put(org.apache.hadoop.hbase.client.Put)

Aggregations

Put (org.apache.hadoop.hbase.client.Put)1416 Test (org.junit.Test)672 Table (org.apache.hadoop.hbase.client.Table)489 ArrayList (java.util.ArrayList)317 Result (org.apache.hadoop.hbase.client.Result)279 TableName (org.apache.hadoop.hbase.TableName)257 IOException (java.io.IOException)241 Delete (org.apache.hadoop.hbase.client.Delete)225 Scan (org.apache.hadoop.hbase.client.Scan)222 Cell (org.apache.hadoop.hbase.Cell)200 Get (org.apache.hadoop.hbase.client.Get)196 Configuration (org.apache.hadoop.conf.Configuration)148 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)139 Connection (org.apache.hadoop.hbase.client.Connection)122 KeyValue (org.apache.hadoop.hbase.KeyValue)112 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)110 Admin (org.apache.hadoop.hbase.client.Admin)89 List (java.util.List)83 Mutation (org.apache.hadoop.hbase.client.Mutation)82 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)80