Search in sources :

Example 41 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class DynamicFamilyIT method initTableValues.

@SuppressWarnings("deprecation")
private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
    HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(WEB_STATS_SCHEMA_NAME, WEB_STATS));
    try {
        // Insert rows using standard HBase mechanism with standard HBase "types"
        Put put;
        List<Row> mutations = new ArrayList<Row>();
        put = new Put(Bytes.toBytes("entry1"));
        put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
        put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID2_BYTES), PInteger.INSTANCE.toBytes(ENTRY1_CLICK_COUNT));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID1_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY1_USER_ID2_LOGIN_TIME));
        mutations.add(put);
        put = new Put(Bytes.toBytes("entry2"));
        put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
        put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID3_BYTES), PInteger.INSTANCE.toBytes(ENTRY2_CLICK_COUNT));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID2_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY2_USER_ID3_LOGIN_TIME));
        mutations.add(put);
        put = new Put(Bytes.toBytes("entry3"));
        put.add(A_CF, QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
        put.add(A_CF, ByteUtil.concat(MAX_CLICK_COUNT_DYNCOL_PREFIX, USER_ID1_BYTES), PInteger.INSTANCE.toBytes(ENTRY3_CLICK_COUNT));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID1_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID1_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID2_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID2_LOGIN_TIME));
        put.add(B_CF, ByteUtil.concat(LAST_LOGIN_TIME_DYNCOL_PREFIX, USER_ID3_BYTES), PTime.INSTANCE.toBytes(ENTRY3_USER_ID3_LOGIN_TIME));
        mutations.add(put);
        hTable.batch(mutations);
    } finally {
        hTable.close();
    }
}
Also used : ArrayList(java.util.ArrayList) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Put(org.apache.hadoop.hbase.client.Put)

Example 42 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class NativeHBaseTypesIT method initTableValues.

@SuppressWarnings("deprecation")
private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
    HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_NATIVE_SCHEMA_NAME, HBASE_NATIVE));
    try {
        // Insert rows using standard HBase mechanism with standard HBase "types"
        List<Row> mutations = new ArrayList<Row>();
        byte[] family = Bytes.toBytes("1");
        byte[] uintCol = Bytes.toBytes("UINT_COL");
        byte[] ulongCol = Bytes.toBytes("ULONG_COL");
        byte[] key, bKey;
        Put put;
        key = ByteUtil.concat(Bytes.toBytes(10), Bytes.toBytes(100L), Bytes.toBytes("a"));
        put = new Put(key);
        put.add(family, uintCol, ts - 2, Bytes.toBytes(5));
        put.add(family, ulongCol, ts - 2, Bytes.toBytes(50L));
        mutations.add(put);
        put = new Put(key);
        put.add(family, uintCol, ts, Bytes.toBytes(10));
        put.add(family, ulongCol, ts, Bytes.toBytes(100L));
        mutations.add(put);
        bKey = key = ByteUtil.concat(Bytes.toBytes(20), Bytes.toBytes(200L), Bytes.toBytes("b"));
        put = new Put(key);
        put.add(family, uintCol, ts - 4, Bytes.toBytes(5000));
        put.add(family, ulongCol, ts - 4, Bytes.toBytes(50000L));
        mutations.add(put);
        // FIXME: the version of the Delete constructor without the lock args was introduced
        // in 0.94.4, thus if we try to use it here we can no longer use the 0.94.2 version
        // of the client.
        Delete del = new Delete(key, ts - 2);
        mutations.add(del);
        put = new Put(key);
        put.add(family, uintCol, ts, Bytes.toBytes(2000));
        put.add(family, ulongCol, ts, Bytes.toBytes(20000L));
        mutations.add(put);
        key = ByteUtil.concat(Bytes.toBytes(30), Bytes.toBytes(300L), Bytes.toBytes("c"));
        put = new Put(key);
        put.add(family, uintCol, ts, Bytes.toBytes(3000));
        put.add(family, ulongCol, ts, Bytes.toBytes(30000L));
        mutations.add(put);
        key = ByteUtil.concat(Bytes.toBytes(40), Bytes.toBytes(400L), Bytes.toBytes("d"));
        put = new Put(key);
        put.add(family, uintCol, ts, Bytes.toBytes(4000));
        put.add(family, ulongCol, ts, Bytes.toBytes(40000L));
        mutations.add(put);
        hTable.batch(mutations);
        Result r = hTable.get(new Get(bKey));
        assertFalse(r.isEmpty());
    } finally {
        hTable.close();
    }
    // Create Phoenix table after HBase table was created through the native APIs
    // The timestamp of the table creation must be later than the timestamp of the data
    ensureTableCreated(getUrl(), HBASE_NATIVE, HBASE_NATIVE, null, ts + 1, null);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Get(org.apache.hadoop.hbase.client.Get) ArrayList(java.util.ArrayList) Row(org.apache.hadoop.hbase.client.Row) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) ConnectionQueryServices(org.apache.phoenix.query.ConnectionQueryServices) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result)

Example 43 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ParameterizedTransactionIT method testNonTxToTxTableFailure.

@Ignore
@Test
public void testNonTxToTxTableFailure() throws Exception {
    String nonTxTableName = generateUniqueName();
    Connection conn = DriverManager.getConnection(getUrl());
    // Put table in SYSTEM schema to prevent attempts to update the cache after we disable SYSTEM.CATALOG
    conn.createStatement().execute("CREATE TABLE \"SYSTEM\"." + nonTxTableName + "(k INTEGER PRIMARY KEY, v VARCHAR)" + tableDDLOptions);
    conn.createStatement().execute("UPSERT INTO \"SYSTEM\"." + nonTxTableName + " VALUES (1)");
    conn.commit();
    // Reset empty column value to an empty value like it is pre-transactions
    HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
    Put put = new Put(PInteger.INSTANCE.toBytes(1));
    put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
    htable.put(put);
    HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
    admin.disableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
    try {
        // This will succeed initially in updating the HBase metadata, but then will fail when
        // the SYSTEM.CATALOG table is attempted to be updated, exercising the code to restore
        // the coprocessors back to the non transactional ones.
        conn.createStatement().execute("ALTER TABLE \"SYSTEM\"." + nonTxTableName + " SET TRANSACTIONAL=true");
        fail();
    } catch (SQLException e) {
        assertTrue(e.getMessage().contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " is disabled"));
    } finally {
        admin.enableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
        admin.close();
    }
    ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM \"SYSTEM\"." + nonTxTableName + " WHERE v IS NULL");
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    assertFalse(rs.next());
    htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
    assertFalse(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
    assertEquals(1, conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("SYSTEM." + nonTxTableName)).getFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).getMaxVersions());
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 44 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class FlappingTransactionIT method testExternalTxContext.

@Test
public void testExternalTxContext() throws Exception {
    ResultSet rs;
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.setAutoCommit(false);
    String fullTableName = generateUniqueName();
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    TransactionSystemClient txServiceClient = pconn.getQueryServices().getTransactionSystemClient();
    Statement stmt = conn.createStatement();
    stmt.execute("CREATE TABLE " + fullTableName + "(K VARCHAR PRIMARY KEY, V1 VARCHAR, V2 VARCHAR) TRANSACTIONAL=true");
    HTableInterface htable = pconn.getQueryServices().getTable(Bytes.toBytes(fullTableName));
    stmt.executeUpdate("upsert into " + fullTableName + " values('x', 'a', 'a')");
    conn.commit();
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
    }
    // Use HBase level Tephra APIs to start a new transaction
    TransactionAwareHTable txAware = new TransactionAwareHTable(htable, TxConstants.ConflictDetection.ROW);
    TransactionContext txContext = new TransactionContext(txServiceClient, txAware);
    txContext.start();
    // Use HBase APIs to add a new row
    Put put = new Put(Bytes.toBytes("z"));
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("b"));
    txAware.put(put);
    // Use Phoenix APIs to add new row (sharing the transaction context)
    pconn.setTransactionContext(txContext);
    conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('y', 'c', 'c')");
    // New connection should not see data as it hasn't been committed yet
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
    }
    // Use new connection to create a row with a conflict
    Connection connWithConflict = DriverManager.getConnection(getUrl(), props);
    connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('z', 'd', 'd')");
    // Existing connection should see data even though it hasn't been committed yet
    rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(3, rs.getInt(1));
    // Use Tephra APIs directly to finish (i.e. commit) the transaction
    txContext.finish();
    // Confirm that attempt to commit row with conflict fails
    try {
        connWithConflict.commit();
        fail();
    } catch (SQLException e) {
        assertEquals(SQLExceptionCode.TRANSACTION_CONFLICT_EXCEPTION.getErrorCode(), e.getErrorCode());
    } finally {
        connWithConflict.close();
    }
    // New connection should now see data as it has been committed
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(3, rs.getInt(1));
    }
    // Repeat the same as above, but this time abort the transaction
    txContext = new TransactionContext(txServiceClient, txAware);
    txContext.start();
    // Use HBase APIs to add a new row
    put = new Put(Bytes.toBytes("j"));
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
    put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, Bytes.toBytes("V1"), Bytes.toBytes("e"));
    txAware.put(put);
    // Use Phoenix APIs to add new row (sharing the transaction context)
    pconn.setTransactionContext(txContext);
    conn.createStatement().executeUpdate("upsert into " + fullTableName + " values('k', 'f', 'f')");
    // Existing connection should see data even though it hasn't been committed yet
    rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(5, rs.getInt(1));
    connWithConflict.createStatement().execute("upsert into " + fullTableName + " values('k', 'g', 'g')");
    rs = connWithConflict.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(4, rs.getInt(1));
    // Use Tephra APIs directly to abort (i.e. rollback) the transaction
    txContext.abort();
    rs = conn.createStatement().executeQuery("select count(*) from " + fullTableName);
    assertTrue(rs.next());
    assertEquals(3, rs.getInt(1));
    // Should succeed since conflicting row was aborted
    connWithConflict.commit();
    // New connection should now see data as it has been committed
    try (Connection newConn = DriverManager.getConnection(getUrl(), props)) {
        rs = newConn.createStatement().executeQuery("select count(*) from " + fullTableName);
        assertTrue(rs.next());
        assertEquals(4, rs.getInt(1));
    }
    // Even using HBase APIs directly, we shouldn't find 'j' since a delete marker would have been
    // written to hide it.
    Result result = htable.get(new Get(Bytes.toBytes("j")));
    assertTrue(result.isEmpty());
}
Also used : TransactionAwareHTable(org.apache.tephra.hbase.TransactionAwareHTable) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) PreparedStatement(java.sql.PreparedStatement) Statement(java.sql.Statement) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Properties(java.util.Properties) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) TransactionContext(org.apache.tephra.TransactionContext) Get(org.apache.hadoop.hbase.client.Get) ResultSet(java.sql.ResultSet) Test(org.junit.Test)

Example 45 with HTableInterface

use of org.apache.hadoop.hbase.client.HTableInterface in project phoenix by apache.

the class ParameterizedTransactionIT method testNonTxToTxTable.

@Test
public void testNonTxToTxTable() throws Exception {
    String nonTxTableName = generateUniqueName();
    Connection conn = DriverManager.getConnection(getUrl());
    conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "(k INTEGER PRIMARY KEY, v VARCHAR)" + tableDDLOptions);
    conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (1)");
    conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (2, 'a')");
    conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (3, 'b')");
    conn.commit();
    String index = generateUniqueName();
    conn.createStatement().execute("CREATE INDEX " + index + " ON " + nonTxTableName + "(v)");
    // Reset empty column value to an empty value like it is pre-transactions
    HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(nonTxTableName));
    List<Put> puts = Lists.newArrayList(new Put(PInteger.INSTANCE.toBytes(1)), new Put(PInteger.INSTANCE.toBytes(2)), new Put(PInteger.INSTANCE.toBytes(3)));
    for (Put put : puts) {
        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
    }
    htable.put(puts);
    conn.createStatement().execute("ALTER TABLE " + nonTxTableName + " SET TRANSACTIONAL=true");
    htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(nonTxTableName));
    assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
    htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(index));
    assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
    conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (4, 'c')");
    ResultSet rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ k FROM " + nonTxTableName + " WHERE v IS NULL");
    assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, nonTxTableName)).isTransactional());
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    assertFalse(rs.next());
    conn.commit();
    conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (5, 'd')");
    rs = conn.createStatement().executeQuery("SELECT k FROM " + nonTxTableName);
    assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, index)).isTransactional());
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(2, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(3, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(4, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(5, rs.getInt(1));
    assertFalse(rs.next());
    conn.rollback();
    rs = conn.createStatement().executeQuery("SELECT k FROM " + nonTxTableName);
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(2, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(3, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(4, rs.getInt(1));
    assertFalse(rs.next());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTableKey(org.apache.phoenix.schema.PTableKey) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Aggregations

HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)122 Result (org.apache.hadoop.hbase.client.Result)43 Put (org.apache.hadoop.hbase.client.Put)42 IOException (java.io.IOException)38 ArrayList (java.util.ArrayList)27 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)23 Get (org.apache.hadoop.hbase.client.Get)21 Scan (org.apache.hadoop.hbase.client.Scan)21 Test (org.junit.Test)20 SQLException (java.sql.SQLException)19 HashMap (java.util.HashMap)17 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)17 Connection (java.sql.Connection)15 Delete (org.apache.hadoop.hbase.client.Delete)12 HBaseAdmin (org.apache.hadoop.hbase.client.HBaseAdmin)12 Mutation (org.apache.hadoop.hbase.client.Mutation)12 PhoenixIOException (org.apache.phoenix.exception.PhoenixIOException)11 ResultSet (java.sql.ResultSet)10 Map (java.util.Map)9 Configuration (org.apache.hadoop.conf.Configuration)9