Search in sources :

Example 51 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class StatisticsWriter method commitStats.

public void commitStats(List<Mutation> mutations, StatisticsCollector statsCollector) throws IOException {
    commitLastStatsUpdatedTime(statsCollector);
    if (mutations.size() > 0) {
        byte[] row = mutations.get(0).getRow();
        MutateRowsRequest.Builder mrmBuilder = MutateRowsRequest.newBuilder();
        for (Mutation m : mutations) {
            mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m));
        }
        MutateRowsRequest mrm = mrmBuilder.build();
        CoprocessorRpcChannel channel = statsWriterTable.coprocessorService(row);
        MultiRowMutationService.BlockingInterface service = MultiRowMutationService.newBlockingStub(channel);
        try {
            service.mutateRows(null, mrm);
        } catch (ServiceException ex) {
            ProtobufUtil.toIOException(ex);
        }
    }
}
Also used : MutateRowsRequest(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest) ServiceException(com.google.protobuf.ServiceException) CoprocessorRpcChannel(org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel) MultiRowMutationService(org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 52 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class TestIndexWriter method testFailureOnRunningUpdateAbortsPending.

/**
   * Index updates can potentially be queued up if there aren't enough writer threads. If a running
   * index write fails, then we should early exit the pending indexupdate, when it comes up (if the
   * pool isn't already shutdown).
   * <p>
   * This test is a little bit racey - we could actually have the failure of the first task before
   * the third task is even submitted. However, we should never see the third task attempt to make
   * the batch write, so we should never see a failure here.
   * @throws Exception on failure
   */
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testFailureOnRunningUpdateAbortsPending() throws Exception {
    Abortable abort = new StubAbortable();
    Stoppable stop = Mockito.mock(Stoppable.class);
    // single thread factory so the older request gets queued
    ExecutorService exec = Executors.newFixedThreadPool(3);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    FakeTableFactory factory = new FakeTableFactory(tables);
    // updates to two different tables
    byte[] tableName = Bytes.add(this.testName.getTableName(), new byte[] { 1, 2, 3, 4 });
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    // this will sort after the first tablename
    byte[] tableName2 = this.testName.getTableName();
    List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
    // first table will fail
    HTableInterface table = Mockito.mock(HTableInterface.class);
    Mockito.when(table.batch(Mockito.anyList())).thenThrow(new IOException("Intentional IOException for failed first write."));
    Mockito.when(table.getTableName()).thenReturn(tableName);
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    // second table just blocks to make sure that the abort propagates to the third task
    final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
    final boolean[] failed = new boolean[] { false };
    HTableInterface table2 = Mockito.mock(HTableInterface.class);
    Mockito.when(table2.getTableName()).thenReturn(tableName2);
    Mockito.when(table2.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            waitOnAbortedLatch.await();
            return null;
        }
    }).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            failed[0] = true;
            throw new RuntimeException("Unexpected exception - second index table shouldn't have been written to");
        }
    });
    // add the tables to the set of tables, so its returned to the writer
    tables.put(new ImmutableBytesPtr(tableName), table);
    tables.put(new ImmutableBytesPtr(tableName2), table2);
    ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    committer.setup(factory, exec, abort, stop, 2, e);
    KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
    policy.setup(stop, abort);
    IndexWriter writer = new IndexWriter(committer, policy);
    try {
        writer.write(indexUpdates);
        fail("Should not have successfully completed all index writes");
    } catch (SingleIndexWriteFailureException s) {
        LOG.info("Correctly got a failure to reach the index", s);
        // should have correctly gotten the correct abort, so let the next task execute
        waitOnAbortedLatch.countDown();
    }
    assertFalse("Third set of index writes never have been attempted - should have seen the abort before done!", failed[0]);
    writer.stop(this.testName.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
Also used : StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Pair(org.apache.hadoop.hbase.util.Pair) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Stoppable(org.apache.hadoop.hbase.Stoppable) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) SingleIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException) ExecutorService(java.util.concurrent.ExecutorService) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 53 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class TestParalleIndexWriter method testSynchronouslyCompletesAllWrites.

@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
    LOG.info("Starting " + test.getTableNameString());
    LOG.info("Current thread is interrupted: " + Thread.interrupted());
    Abortable abort = new StubAbortable();
    Stoppable stop = Mockito.mock(Stoppable.class);
    ExecutorService exec = Executors.newFixedThreadPool(1);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    FakeTableFactory factory = new FakeTableFactory(tables);
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    Multimap<HTableInterfaceReference, Mutation> indexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
    indexUpdates.put(new HTableInterfaceReference(tableName), m);
    HTableInterface table = Mockito.mock(HTableInterface.class);
    final boolean[] completed = new boolean[] { false };
    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            // just keep track that it was called
            completed[0] = true;
            return null;
        }
    });
    Mockito.when(table.getTableName()).thenReturn(test.getTableName());
    // add the table to the set of tables, so its returned to the writer
    tables.put(tableName, table);
    // setup the writer and failure policy
    ParallelWriterIndexCommitter writer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    writer.setup(factory, exec, abort, stop, 1, e);
    writer.write(indexUpdates, true);
    assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", completed[0]);
    writer.stop(this.test.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
Also used : StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Stoppable(org.apache.hadoop.hbase.Stoppable) Put(org.apache.hadoop.hbase.client.Put) InvocationOnMock(org.mockito.invocation.InvocationOnMock) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ExecutorService(java.util.concurrent.ExecutorService) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 54 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class IndexMaintainerTest method testIndexRowKeyBuilding.

private void testIndexRowKeyBuilding(String schemaName, String tableName, String dataColumns, String pk, String indexColumns, Object[] values, String includeColumns, String dataProps, String indexProps, KeyValueBuilder builder) throws Exception {
    Connection conn = DriverManager.getConnection(getUrl());
    String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName), SchemaUtil.normalizeIdentifier(tableName));
    String fullIndexName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName), SchemaUtil.normalizeIdentifier("idx"));
    conn.createStatement().execute("CREATE TABLE " + fullTableName + "(" + dataColumns + " CONSTRAINT pk PRIMARY KEY (" + pk + "))  " + (dataProps.isEmpty() ? "" : dataProps));
    try {
        conn.createStatement().execute("CREATE INDEX idx ON " + fullTableName + "(" + indexColumns + ") " + (includeColumns.isEmpty() ? "" : "INCLUDE (" + includeColumns + ") ") + (indexProps.isEmpty() ? "" : indexProps));
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName));
        PTable index = pconn.getTable(new PTableKey(pconn.getTenantId(), fullIndexName));
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        table.getIndexMaintainers(ptr, pconn);
        List<IndexMaintainer> c1 = IndexMaintainer.deserialize(ptr, builder, true);
        assertEquals(1, c1.size());
        IndexMaintainer im1 = c1.get(0);
        StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES(");
        for (int i = 0; i < values.length; i++) {
            buf.append("?,");
        }
        buf.setCharAt(buf.length() - 1, ')');
        PreparedStatement stmt = conn.prepareStatement(buf.toString());
        for (int i = 0; i < values.length; i++) {
            stmt.setObject(i + 1, values[i]);
        }
        stmt.execute();
        Iterator<Pair<byte[], List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
        List<KeyValue> dataKeyValues = iterator.next().getSecond();
        Map<ColumnReference, byte[]> valueMap = Maps.newHashMapWithExpectedSize(dataKeyValues.size());
        byte[] row = dataKeyValues.get(0).getRow();
        ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(row);
        Put dataMutation = new Put(rowKeyPtr.copyBytes());
        for (KeyValue kv : dataKeyValues) {
            valueMap.put(new ColumnReference(kv.getFamily(), kv.getQualifier()), kv.getValue());
            dataMutation.add(kv);
        }
        ValueGetter valueGetter = newValueGetter(row, valueMap);
        List<Mutation> indexMutations = IndexTestUtil.generateIndexData(index, table, dataMutation, ptr, builder);
        assertEquals(1, indexMutations.size());
        assertTrue(indexMutations.get(0) instanceof Put);
        Mutation indexMutation = indexMutations.get(0);
        ImmutableBytesWritable indexKeyPtr = new ImmutableBytesWritable(indexMutation.getRow());
        ptr.set(rowKeyPtr.get(), rowKeyPtr.getOffset(), rowKeyPtr.getLength());
        byte[] mutablelndexRowKey = im1.buildRowKey(valueGetter, ptr, null, null);
        byte[] immutableIndexRowKey = indexKeyPtr.copyBytes();
        assertArrayEquals(immutableIndexRowKey, mutablelndexRowKey);
        for (ColumnReference ref : im1.getCoveredColumns()) {
            valueMap.get(ref);
        }
        byte[] dataRowKey = im1.buildDataRowKey(indexKeyPtr, null);
        assertArrayEquals(dataRowKey, dataKeyValues.get(0).getRow());
    } finally {
        try {
            conn.createStatement().execute("DROP TABLE " + fullTableName);
        } finally {
            conn.close();
        }
    }
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) KeyValue(org.apache.hadoop.hbase.KeyValue) Connection(java.sql.Connection) PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PreparedStatement(java.sql.PreparedStatement) PTable(org.apache.phoenix.schema.PTable) Put(org.apache.hadoop.hbase.client.Put) ValueGetter(org.apache.phoenix.hbase.index.ValueGetter) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableKey(org.apache.phoenix.schema.PTableKey) Pair(org.apache.hadoop.hbase.util.Pair) ColumnReference(org.apache.phoenix.hbase.index.covered.update.ColumnReference)

Example 55 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class TestIndexUpdateManager method testCancelingUpdates.

/**
   * When making updates we need to cancel out {@link Delete} and {@link Put}s for the same row.
   * @throws Exception on failure
   */
@Test
public void testCancelingUpdates() throws Exception {
    IndexUpdateManager manager = new IndexUpdateManager(mockIndexMetaData);
    long ts1 = 10, ts2 = 11;
    // at different timestamps, so both should be retained
    Delete d = new Delete(row, ts1);
    Put p = new Put(row, ts2);
    manager.addIndexUpdate(table, d);
    manager.addIndexUpdate(table, p);
    List<Mutation> pending = new ArrayList<Mutation>();
    pending.add(p);
    pending.add(d);
    validate(manager, pending);
    // add a delete that should cancel out the put, leading to only one delete remaining
    Delete d2 = new Delete(row, ts2);
    manager.addIndexUpdate(table, d2);
    pending.add(d);
    validate(manager, pending);
    // double-deletes of the same row only retain the existing one, which was already canceled out
    // above
    Delete d3 = new Delete(row, ts2);
    manager.addIndexUpdate(table, d3);
    pending.add(d);
    validate(manager, pending);
    // if there is just a put and a delete at the same ts, no pending updates should be returned
    manager = new IndexUpdateManager(mockIndexMetaData);
    manager.addIndexUpdate(table, d2);
    manager.addIndexUpdate(table, p);
    validate(manager, Collections.<Mutation>emptyList());
    // different row insertions can be tricky too, if you don't get the base cases right
    manager = new IndexUpdateManager(mockIndexMetaData);
    manager.addIndexUpdate(table, p);
    // this row definitely sorts after the current row
    byte[] row1 = Bytes.toBytes("row1");
    Put p1 = new Put(row1, ts1);
    manager.addIndexUpdate(table, p1);
    // this delete should completely cover the given put and both should be removed
    Delete d4 = new Delete(row1, ts1);
    manager.addIndexUpdate(table, d4);
    pending.clear();
    pending.add(p);
    validate(manager, pending);
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) IndexUpdateManager(org.apache.phoenix.hbase.index.covered.update.IndexUpdateManager) ArrayList(java.util.ArrayList) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Aggregations

Mutation (org.apache.hadoop.hbase.client.Mutation)139 Put (org.apache.hadoop.hbase.client.Put)53 ArrayList (java.util.ArrayList)46 IOException (java.io.IOException)35 Delete (org.apache.hadoop.hbase.client.Delete)32 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)31 List (java.util.List)28 Cell (org.apache.hadoop.hbase.Cell)25 Pair (org.apache.hadoop.hbase.util.Pair)23 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)23 HashMap (java.util.HashMap)19 PTable (org.apache.phoenix.schema.PTable)18 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)17 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)15 Region (org.apache.hadoop.hbase.regionserver.Region)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)14 Test (org.junit.Test)14 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)12