Search in sources :

Example 96 with Put

use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.

the class TestNonTxIndexBuilder method testRebuildMultipleVersionRow.

/**
     * Tests a partial rebuild of a row with multiple versions. 3 versions of the row in data table,
     * and we rebuild the index starting from time t=2
     *
     * There should be one index row version per data row version.
     */
@Test
public void testRebuildMultipleVersionRow() throws IOException {
    // when doing a rebuild, we are replaying mutations so we want to ignore newer mutations
    // see LocalTable#getCurrentRowState()
    Mockito.when(mockIndexMetaData.ignoreNewerMutations()).thenReturn(true);
    // the current row state has 3 versions, but if we rebuild as of t=2, scanner in LocalTable
    // should only return first
    Cell currentCell1 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 1, PUT_TYPE, VALUE_1);
    Cell currentCell2 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 2, PUT_TYPE, VALUE_2);
    Cell currentCell3 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 3, PUT_TYPE, VALUE_3);
    Cell currentCell4 = CellUtil.createCell(ROW, FAM, INDEXED_QUALIFIER, 4, PUT_TYPE, VALUE_4);
    setCurrentRowState(Arrays.asList(currentCell4, currentCell3, currentCell2, currentCell1));
    // rebuilder replays mutations starting from t=2
    MultiMutation mutation = new MultiMutation(new ImmutableBytesPtr(ROW));
    Put put = new Put(ROW);
    put.addImmutable(FAM, INDEXED_QUALIFIER, 4, VALUE_4);
    mutation.addAll(put);
    put = new Put(ROW);
    put.addImmutable(FAM, INDEXED_QUALIFIER, 3, VALUE_3);
    mutation.addAll(put);
    put = new Put(ROW);
    put.addImmutable(FAM, INDEXED_QUALIFIER, 2, VALUE_2);
    mutation.addAll(put);
    Collection<Pair<Mutation, byte[]>> indexUpdates = indexBuilder.getIndexUpdate(mutation, mockIndexMetaData);
    // 3 puts and 3 deletes (one to hide existing index row for VALUE_1, and two to hide index
    // rows for VALUE_2, VALUE_3)
    assertEquals(6, indexUpdates.size());
    assertContains(indexUpdates, 2, ROW, KeyValue.Type.DeleteFamily, FAM, new byte[0], /* qual not needed */
    2);
    assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, KeyValue.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 2);
    assertContains(indexUpdates, 3, ROW, KeyValue.Type.DeleteFamily, FAM, new byte[0], /* qual not needed */
    3);
    assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, KeyValue.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 3);
    assertContains(indexUpdates, 4, ROW, KeyValue.Type.DeleteFamily, FAM, new byte[0], /* qual not needed */
    4);
    assertContains(indexUpdates, ColumnTracker.NO_NEWER_PRIMARY_TABLE_ENTRY_TIMESTAMP, ROW, KeyValue.Type.Put, FAM, QueryConstants.EMPTY_COLUMN_BYTES, 4);
}
Also used : MultiMutation(org.apache.phoenix.hbase.index.MultiMutation) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put) Pair(org.apache.hadoop.hbase.util.Pair) Test(org.junit.Test) BaseConnectionlessQueryTest(org.apache.phoenix.query.BaseConnectionlessQueryTest)

Example 97 with Put

use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.

the class TestLocalTableState method testScannerForMutableRows.

@Test(expected = ScannerCreatedException.class)
public void testScannerForMutableRows() throws Exception {
    IndexMetaData indexMetaData = new IndexMetaData() {

        @Override
        public boolean isImmutableRows() {
            return false;
        }

        @Override
        public boolean ignoreNewerMutations() {
            return false;
        }
    };
    Put m = new Put(row);
    m.add(fam, qual, ts, val);
    // setup mocks
    Configuration conf = new Configuration(false);
    RegionCoprocessorEnvironment env = Mockito.mock(RegionCoprocessorEnvironment.class);
    Mockito.when(env.getConfiguration()).thenReturn(conf);
    Region region = Mockito.mock(Region.class);
    Mockito.when(env.getRegion()).thenReturn(region);
    Mockito.when(region.getScanner(Mockito.any(Scan.class))).thenThrow(new ScannerCreatedException("Should not open scanner when data is immutable"));
    LocalHBaseState state = new LocalTable(env);
    LocalTableState table = new LocalTableState(env, state, m);
    //add the kvs from the mutation
    table.addPendingUpdates(KeyValueUtil.ensureKeyValues(m.get(fam, qual)));
    // setup the lookup
    ColumnReference col = new ColumnReference(fam, qual);
    table.setCurrentTimestamp(ts);
    table.getIndexedColumnsTableState(Arrays.asList(col), false, false, indexMetaData);
}
Also used : LocalTable(org.apache.phoenix.hbase.index.covered.data.LocalTable) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) LocalHBaseState(org.apache.phoenix.hbase.index.covered.data.LocalHBaseState) Configuration(org.apache.hadoop.conf.Configuration) Region(org.apache.hadoop.hbase.regionserver.Region) Scan(org.apache.hadoop.hbase.client.Scan) Put(org.apache.hadoop.hbase.client.Put) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference) Test(org.junit.Test)

Example 98 with Put

use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.

the class ParameterizedTransactionIT method testNonTxToTxTableFailure.

@Ignore
@Test
public void testNonTxToTxTableFailure() throws Exception {
    String nonTxTableName = generateUniqueName();
    Connection conn = DriverManager.getConnection(getUrl());
    // Put table in SYSTEM schema to prevent attempts to update the cache after we disable SYSTEM.CATALOG
    conn.createStatement().execute("CREATE TABLE \"SYSTEM\"." + nonTxTableName + "(k INTEGER PRIMARY KEY, v VARCHAR)" + tableDDLOptions);
    conn.createStatement().execute("UPSERT INTO \"SYSTEM\"." + nonTxTableName + " VALUES (1)");
    conn.commit();
    // Reset empty column value to an empty value like it is pre-transactions
    HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
    Put put = new Put(PInteger.INSTANCE.toBytes(1));
    put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
    htable.put(put);
    HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin();
    admin.disableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
    try {
        // This will succeed initially in updating the HBase metadata, but then will fail when
        // the SYSTEM.CATALOG table is attempted to be updated, exercising the code to restore
        // the coprocessors back to the non transactional ones.
        conn.createStatement().execute("ALTER TABLE \"SYSTEM\"." + nonTxTableName + " SET TRANSACTIONAL=true");
        fail();
    } catch (SQLException e) {
        assertTrue(e.getMessage().contains(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + " is disabled"));
    } finally {
        admin.enableTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
        admin.close();
    }
    ResultSet rs = conn.createStatement().executeQuery("SELECT k FROM \"SYSTEM\"." + nonTxTableName + " WHERE v IS NULL");
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    assertFalse(rs.next());
    htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("SYSTEM." + nonTxTableName));
    assertFalse(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
    assertEquals(1, conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(Bytes.toBytes("SYSTEM." + nonTxTableName)).getFamily(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES).getMaxVersions());
}
Also used : HBaseAdmin(org.apache.hadoop.hbase.client.HBaseAdmin) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) SQLException(java.sql.SQLException) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Put(org.apache.hadoop.hbase.client.Put) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 99 with Put

use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.

the class ParameterizedTransactionIT method testNonTxToTxTable.

@Test
public void testNonTxToTxTable() throws Exception {
    String nonTxTableName = generateUniqueName();
    Connection conn = DriverManager.getConnection(getUrl());
    conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "(k INTEGER PRIMARY KEY, v VARCHAR)" + tableDDLOptions);
    conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (1)");
    conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (2, 'a')");
    conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (3, 'b')");
    conn.commit();
    String index = generateUniqueName();
    conn.createStatement().execute("CREATE INDEX " + index + " ON " + nonTxTableName + "(v)");
    // Reset empty column value to an empty value like it is pre-transactions
    HTableInterface htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(nonTxTableName));
    List<Put> puts = Lists.newArrayList(new Put(PInteger.INSTANCE.toBytes(1)), new Put(PInteger.INSTANCE.toBytes(2)), new Put(PInteger.INSTANCE.toBytes(3)));
    for (Put put : puts) {
        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, ByteUtil.EMPTY_BYTE_ARRAY);
    }
    htable.put(puts);
    conn.createStatement().execute("ALTER TABLE " + nonTxTableName + " SET TRANSACTIONAL=true");
    htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(nonTxTableName));
    assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
    htable = conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(index));
    assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
    conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (4, 'c')");
    ResultSet rs = conn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ k FROM " + nonTxTableName + " WHERE v IS NULL");
    assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, nonTxTableName)).isTransactional());
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    assertFalse(rs.next());
    conn.commit();
    conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " VALUES (5, 'd')");
    rs = conn.createStatement().executeQuery("SELECT k FROM " + nonTxTableName);
    assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, index)).isTransactional());
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(2, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(3, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(4, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(5, rs.getInt(1));
    assertFalse(rs.next());
    conn.rollback();
    rs = conn.createStatement().executeQuery("SELECT k FROM " + nonTxTableName);
    assertTrue(rs.next());
    assertEquals(1, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(2, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(3, rs.getInt(1));
    assertTrue(rs.next());
    assertEquals(4, rs.getInt(1));
    assertFalse(rs.next());
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ResultSet(java.sql.ResultSet) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTableKey(org.apache.phoenix.schema.PTableKey) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 100 with Put

use of org.apache.hadoop.hbase.client.Put in project phoenix by apache.

the class PhoenixIndexBuilder method convertIncrementToPutInSingletonList.

private static List<Mutation> convertIncrementToPutInSingletonList(Increment inc) {
    byte[] rowKey = inc.getRow();
    Put put = new Put(rowKey);
    transferCells(inc, put);
    transferAttributes(inc, put);
    return Collections.<Mutation>singletonList(put);
}
Also used : Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put)

Aggregations

Put (org.apache.hadoop.hbase.client.Put)1416 Test (org.junit.Test)672 Table (org.apache.hadoop.hbase.client.Table)489 ArrayList (java.util.ArrayList)317 Result (org.apache.hadoop.hbase.client.Result)279 TableName (org.apache.hadoop.hbase.TableName)257 IOException (java.io.IOException)241 Delete (org.apache.hadoop.hbase.client.Delete)225 Scan (org.apache.hadoop.hbase.client.Scan)222 Cell (org.apache.hadoop.hbase.Cell)200 Get (org.apache.hadoop.hbase.client.Get)196 Configuration (org.apache.hadoop.conf.Configuration)148 TableDescriptor (org.apache.hadoop.hbase.client.TableDescriptor)139 Connection (org.apache.hadoop.hbase.client.Connection)122 KeyValue (org.apache.hadoop.hbase.KeyValue)112 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)110 Admin (org.apache.hadoop.hbase.client.Admin)89 List (java.util.List)83 Mutation (org.apache.hadoop.hbase.client.Mutation)82 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)80