Search in sources :

Example 11 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class TrackingParallelWriterIndexCommitter method write.

@Override
public void write(Multimap<HTableInterfaceReference, Mutation> toWrite, final boolean allowLocalUpdates) throws MultiIndexWriteFailureException {
    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = toWrite.asMap().entrySet();
    TaskBatch<Boolean> tasks = new TaskBatch<Boolean>(entries.size());
    List<HTableInterfaceReference> tables = new ArrayList<HTableInterfaceReference>(entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // get the mutations for each table. We leak the implementation here a little bit to save
        // doing a complete copy over of all the index update for each table.
        final List<Mutation> mutations = (List<Mutation>) entry.getValue();
        // track each reference so we can get at it easily later, when determing failures
        final HTableInterfaceReference tableReference = entry.getKey();
        final RegionCoprocessorEnvironment env = this.env;
        if (env != null && !allowLocalUpdates && tableReference.getTableName().equals(env.getRegion().getTableDesc().getNameAsString())) {
            continue;
        }
        tables.add(tableReference);
        /*
             * Write a batch of index updates to an index table. This operation stops (is cancelable) via two
             * mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the running thread.
             * The former will only work if we are not in the midst of writing the current batch to the table, though we
             * do check these status variables before starting and before writing the batch. The latter usage,
             * interrupting the thread, will work in the previous situations as was at some points while writing the
             * batch, depending on the underlying writer implementation (HTableInterface#batch is blocking, but doesn't
             * elaborate when is supports an interrupt).
             */
        tasks.add(new Task<Boolean>() {

            /**
                 * Do the actual write to the primary table. We don't need to worry about closing the table because that
                 * is handled the {@link CachingHTableFactory}.
                 */
            @SuppressWarnings("deprecation")
            @Override
            public Boolean call() throws Exception {
                HTableInterface table = null;
                try {
                    // this may have been queued, but there was an abort/stop so we try to early exit
                    throwFailureIfDone();
                    if (allowLocalUpdates && env != null && tableReference.getTableName().equals(env.getRegion().getTableDesc().getNameAsString())) {
                        try {
                            throwFailureIfDone();
                            IndexUtil.writeLocalUpdates(env.getRegion(), mutations, true);
                            return Boolean.TRUE;
                        } catch (IOException ignord) {
                            // when it's failed we fall back to the standard & slow way
                            if (LOG.isTraceEnabled()) {
                                LOG.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error=" + ignord);
                            }
                        }
                    }
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
                    }
                    table = factory.getTable(tableReference.get());
                    throwFailureIfDone();
                    table.batch(mutations);
                } catch (InterruptedException e) {
                    // reset the interrupt status on the thread
                    Thread.currentThread().interrupt();
                    throw e;
                } catch (Exception e) {
                    throw e;
                } finally {
                    if (table != null) {
                        table.close();
                    }
                }
                return Boolean.TRUE;
            }

            private void throwFailureIfDone() throws SingleIndexWriteFailureException {
                if (stopped.isStopped() || abortable.isAborted() || Thread.currentThread().isInterrupted()) {
                    throw new SingleIndexWriteFailureException("Pool closed, not attempting to write to the index!", null);
                }
            }
        });
    }
    List<Boolean> results = null;
    try {
        LOG.debug("Waiting on index update tasks to complete...");
        results = this.pool.submitUninterruptible(tasks);
    } catch (ExecutionException e) {
        throw new RuntimeException("Should not fail on the results while using a WaitForCompletionTaskRunner", e);
    } catch (EarlyExitFailure e) {
        throw new RuntimeException("Stopped while waiting for batch, quiting!", e);
    }
    // track the failures. We only ever access this on return from our calls, so no extra
    // synchronization is needed. We could update all the failures as we find them, but that add a
    // lot of locking overhead, and just doing the copy later is about as efficient.
    List<HTableInterfaceReference> failures = new ArrayList<HTableInterfaceReference>();
    int index = 0;
    for (Boolean result : results) {
        // there was a failure
        if (result == null) {
            // we know which table failed by the index of the result
            failures.add(tables.get(index));
        }
        index++;
    }
    // if any of the tasks failed, then we need to propagate the failure
    if (failures.size() > 0) {
        // make the list unmodifiable to avoid any more synchronization concerns
        throw new MultiIndexWriteFailureException(Collections.unmodifiableList(failures));
    }
    return;
}
Also used : ArrayList(java.util.ArrayList) TaskBatch(org.apache.phoenix.hbase.index.parallel.TaskBatch) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) Entry(java.util.Map.Entry) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) ArrayList(java.util.ArrayList) List(java.util.List) ExecutionException(java.util.concurrent.ExecutionException) EarlyExitFailure(org.apache.phoenix.hbase.index.parallel.EarlyExitFailure) IOException(java.io.IOException) MultiIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException) MultiIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SingleIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException) SingleIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) Collection(java.util.Collection) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 12 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class TestIndexWriter method testFailureOnRunningUpdateAbortsPending.

/**
   * Index updates can potentially be queued up if there aren't enough writer threads. If a running
   * index write fails, then we should early exit the pending indexupdate, when it comes up (if the
   * pool isn't already shutdown).
   * <p>
   * This test is a little bit racey - we could actually have the failure of the first task before
   * the third task is even submitted. However, we should never see the third task attempt to make
   * the batch write, so we should never see a failure here.
   * @throws Exception on failure
   */
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testFailureOnRunningUpdateAbortsPending() throws Exception {
    Abortable abort = new StubAbortable();
    Stoppable stop = Mockito.mock(Stoppable.class);
    // single thread factory so the older request gets queued
    ExecutorService exec = Executors.newFixedThreadPool(3);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    FakeTableFactory factory = new FakeTableFactory(tables);
    // updates to two different tables
    byte[] tableName = Bytes.add(this.testName.getTableName(), new byte[] { 1, 2, 3, 4 });
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    // this will sort after the first tablename
    byte[] tableName2 = this.testName.getTableName();
    List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName2));
    // first table will fail
    HTableInterface table = Mockito.mock(HTableInterface.class);
    Mockito.when(table.batch(Mockito.anyList())).thenThrow(new IOException("Intentional IOException for failed first write."));
    Mockito.when(table.getTableName()).thenReturn(tableName);
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    // second table just blocks to make sure that the abort propagates to the third task
    final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
    final boolean[] failed = new boolean[] { false };
    HTableInterface table2 = Mockito.mock(HTableInterface.class);
    Mockito.when(table2.getTableName()).thenReturn(tableName2);
    Mockito.when(table2.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            waitOnAbortedLatch.await();
            return null;
        }
    }).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            failed[0] = true;
            throw new RuntimeException("Unexpected exception - second index table shouldn't have been written to");
        }
    });
    // add the tables to the set of tables, so its returned to the writer
    tables.put(new ImmutableBytesPtr(tableName), table);
    tables.put(new ImmutableBytesPtr(tableName2), table2);
    ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    committer.setup(factory, exec, abort, stop, 2, e);
    KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
    policy.setup(stop, abort);
    IndexWriter writer = new IndexWriter(committer, policy);
    try {
        writer.write(indexUpdates);
        fail("Should not have successfully completed all index writes");
    } catch (SingleIndexWriteFailureException s) {
        LOG.info("Correctly got a failure to reach the index", s);
        // should have correctly gotten the correct abort, so let the next task execute
        waitOnAbortedLatch.countDown();
    }
    assertFalse("Third set of index writes never have been attempted - should have seen the abort before done!", failed[0]);
    writer.stop(this.testName.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
Also used : StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Pair(org.apache.hadoop.hbase.util.Pair) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Stoppable(org.apache.hadoop.hbase.Stoppable) IOException(java.io.IOException) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) SingleIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException) ExecutorService(java.util.concurrent.ExecutorService) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 13 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class TestParalleWriterIndexCommitter method testCorrectlyCleansUpResources.

@Test
public void testCorrectlyCleansUpResources() throws Exception {
    ExecutorService exec = Executors.newFixedThreadPool(1);
    FakeTableFactory factory = new FakeTableFactory(Collections.<ImmutableBytesPtr, HTableInterface>emptyMap());
    ParallelWriterIndexCommitter writer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    Abortable mockAbort = Mockito.mock(Abortable.class);
    Stoppable mockStop = Mockito.mock(Stoppable.class);
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    // create a simple writer
    writer.setup(factory, exec, mockAbort, mockStop, 1, e);
    // stop the writer
    writer.stop(this.test.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
    Mockito.verifyZeroInteractions(mockAbort, mockStop);
}
Also used : RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Configuration(org.apache.hadoop.conf.Configuration) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) ExecutorService(java.util.concurrent.ExecutorService) Stoppable(org.apache.hadoop.hbase.Stoppable) Test(org.junit.Test)

Example 14 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class TestParalleIndexWriter method testSynchronouslyCompletesAllWrites.

@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
    LOG.info("Starting " + test.getTableNameString());
    LOG.info("Current thread is interrupted: " + Thread.interrupted());
    Abortable abort = new StubAbortable();
    Stoppable stop = Mockito.mock(Stoppable.class);
    ExecutorService exec = Executors.newFixedThreadPool(1);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    FakeTableFactory factory = new FakeTableFactory(tables);
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    Multimap<HTableInterfaceReference, Mutation> indexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
    indexUpdates.put(new HTableInterfaceReference(tableName), m);
    HTableInterface table = Mockito.mock(HTableInterface.class);
    final boolean[] completed = new boolean[] { false };
    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            // just keep track that it was called
            completed[0] = true;
            return null;
        }
    });
    Mockito.when(table.getTableName()).thenReturn(test.getTableName());
    // add the table to the set of tables, so its returned to the writer
    tables.put(tableName, table);
    // setup the writer and failure policy
    ParallelWriterIndexCommitter writer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    writer.setup(factory, exec, abort, stop, 1, e);
    writer.write(indexUpdates, true);
    assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", completed[0]);
    writer.stop(this.test.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
Also used : StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Stoppable(org.apache.hadoop.hbase.Stoppable) Put(org.apache.hadoop.hbase.client.Put) InvocationOnMock(org.mockito.invocation.InvocationOnMock) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ExecutorService(java.util.concurrent.ExecutorService) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 15 with RegionCoprocessorEnvironment

use of org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment in project phoenix by apache.

the class TestIndexWriter method testShutdownInterruptsAsExpected.

/**
   * Test that if we get an interruption to to the thread while doing a batch (e.g. via shutdown),
   * that we correctly end the task
   * @throws Exception on failure
   */
@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testShutdownInterruptsAsExpected() throws Exception {
    Stoppable stop = Mockito.mock(Stoppable.class);
    Abortable abort = new StubAbortable();
    // single thread factory so the older request gets queued
    ExecutorService exec = Executors.newFixedThreadPool(1);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    FakeTableFactory factory = new FakeTableFactory(tables);
    byte[] tableName = this.testName.getTableName();
    HTableInterface table = Mockito.mock(HTableInterface.class);
    Mockito.when(table.getTableName()).thenReturn(tableName);
    final CountDownLatch writeStartedLatch = new CountDownLatch(1);
    // latch never gets counted down, so we wait forever
    final CountDownLatch waitOnAbortedLatch = new CountDownLatch(1);
    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            LOG.info("Write started");
            writeStartedLatch.countDown();
            // when we interrupt the thread for shutdown, we should see this throw an interrupt too
            try {
                waitOnAbortedLatch.await();
            } catch (InterruptedException e) {
                LOG.info("Correctly interrupted while writing!");
                throw e;
            }
            return null;
        }
    });
    // add the tables to the set of tables, so its returned to the writer
    tables.put(new ImmutableBytesPtr(tableName), table);
    // update a single table
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    final List<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>();
    indexUpdates.add(new Pair<Mutation, byte[]>(m, tableName));
    // setup the writer
    ParallelWriterIndexCommitter committer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    committer.setup(factory, exec, abort, stop, 2, e);
    KillServerOnFailurePolicy policy = new KillServerOnFailurePolicy();
    policy.setup(stop, abort);
    final IndexWriter writer = new IndexWriter(committer, policy);
    final boolean[] failedWrite = new boolean[] { false };
    Thread primaryWriter = new Thread() {

        @Override
        public void run() {
            try {
                writer.write(indexUpdates);
            } catch (IndexWriteException e) {
                failedWrite[0] = true;
            }
        }
    };
    primaryWriter.start();
    // wait for the write to start before intentionally shutdown the pool
    writeStartedLatch.await();
    writer.stop("Shutting down writer for test " + this.testName.getTableNameString());
    primaryWriter.join();
    assertTrue("Writer should have failed because of the stop we issued", failedWrite[0]);
}
Also used : StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ArrayList(java.util.ArrayList) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Pair(org.apache.hadoop.hbase.util.Pair) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Stoppable(org.apache.hadoop.hbase.Stoppable) CountDownLatch(java.util.concurrent.CountDownLatch) Put(org.apache.hadoop.hbase.client.Put) IndexWriteException(org.apache.phoenix.hbase.index.exception.IndexWriteException) InvocationOnMock(org.mockito.invocation.InvocationOnMock) ExecutorService(java.util.concurrent.ExecutorService) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Aggregations

RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)78 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)37 Configuration (org.apache.hadoop.conf.Configuration)25 CConfigurationReader (co.cask.cdap.data2.transaction.queue.hbase.coprocessor.CConfigurationReader)21 Test (org.junit.Test)16 TopicMetadataCacheSupplier (co.cask.cdap.messaging.TopicMetadataCacheSupplier)14 Put (org.apache.hadoop.hbase.client.Put)14 Region (org.apache.hadoop.hbase.regionserver.Region)14 DefaultScanBuilder (co.cask.cdap.data2.util.hbase.DefaultScanBuilder)11 Mutation (org.apache.hadoop.hbase.client.Mutation)9 InvocationOnMock (org.mockito.invocation.InvocationOnMock)9 Cell (org.apache.hadoop.hbase.Cell)8 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)8 HColumnDescriptor (org.apache.hadoop.hbase.HColumnDescriptor)8 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)8 AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)8 User (org.apache.hadoop.hbase.security.User)8 CConfiguration (co.cask.cdap.common.conf.CConfiguration)7 IncrementHandlerState (co.cask.cdap.data2.increment.hbase.IncrementHandlerState)7 CConfigurationCacheSupplier (co.cask.cdap.data2.transaction.coprocessor.CConfigurationCacheSupplier)7