Search in sources :

Example 16 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class TestPerRegionIndexWriteCache method testMultipleRegions.

@Test
public void testMultipleRegions() {
    PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache();
    HTableInterfaceReference t1 = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1")));
    List<Mutation> mutations = Lists.<Mutation>newArrayList(p);
    List<Mutation> m2 = Lists.<Mutation>newArrayList(p2);
    // add each region
    cache.addEdits(r1, t1, mutations);
    cache.addEdits(r2, t1, m2);
    // check region1
    Multimap<HTableInterfaceReference, Mutation> edits = cache.getEdits(r1);
    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = edits.asMap().entrySet();
    assertEquals("Got more than one table in the the edit map!", 1, entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // ensure that we are still storing a list here - otherwise it breaks the parallel writer
        // implementation
        final List<Mutation> stored = (List<Mutation>) entry.getValue();
        assertEquals("Got an unexpected amount of mutations in the entry for region1", 1, stored.size());
        assertEquals("Got an unexpected mutation in the entry for region2", p, stored.get(0));
    }
    // check region2
    edits = cache.getEdits(r2);
    entries = edits.asMap().entrySet();
    assertEquals("Got more than one table in the the edit map!", 1, entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // ensure that we are still storing a list here - otherwise it breaks the parallel writer
        // implementation
        final List<Mutation> stored = (List<Mutation>) entry.getValue();
        assertEquals("Got an unexpected amount of mutations in the entry for region2", 1, stored.size());
        assertEquals("Got an unexpected mutation in the entry for region2", p2, stored.get(0));
    }
    // ensure that a second get doesn't have any more edits. This ensures that we don't keep
    // references around to these edits and have a memory leak
    assertNull("Got an entry for a region we removed", cache.getEdits(r1));
}
Also used : Entry(java.util.Map.Entry) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Collection(java.util.Collection) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 17 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class TestPerRegionIndexWriteCache method testAddRemoveSingleRegion.

@Test
public void testAddRemoveSingleRegion() {
    PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache();
    HTableInterfaceReference t1 = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1")));
    List<Mutation> mutations = new ArrayList<Mutation>();
    mutations.add(p);
    cache.addEdits(r1, t1, mutations);
    Multimap<HTableInterfaceReference, Mutation> edits = cache.getEdits(r1);
    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = edits.asMap().entrySet();
    assertEquals("Got more than one table in the the edit map!", 1, entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // ensure that we are still storing a list here - otherwise it breaks the parallel writer implementation
        final List<Mutation> stored = (List<Mutation>) entry.getValue();
        assertEquals("Got an unexpected amount of mutations in the entry", 1, stored.size());
        assertEquals("Got an unexpected mutation in the entry", p, stored.get(0));
    }
    // ensure that a second get doesn't have any more edits. This ensures that we don't keep
    // references around to these edits and have a memory leak
    assertNull("Got an entry for a region we removed", cache.getEdits(r1));
}
Also used : Entry(java.util.Map.Entry) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) Collection(java.util.Collection) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 18 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class TestParalleWriterIndexCommitter method testSynchronouslyCompletesAllWrites.

@SuppressWarnings({ "unchecked" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
    LOGGER.info("Starting " + test.getTableNameString());
    LOGGER.info("Current thread is interrupted: " + Thread.interrupted());
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    Region mockRegion = Mockito.mock(Region.class);
    Mockito.when(e.getRegion()).thenReturn(mockRegion);
    TableDescriptor mockTableDesc = Mockito.mock(TableDescriptor.class);
    Mockito.when(mockTableDesc.getTableName()).thenReturn(TableName.valueOf("test"));
    Connection mockConnection = Mockito.mock(Connection.class);
    Mockito.when(e.getConnection()).thenReturn(mockConnection);
    Mockito.when(mockRegion.getTableDescriptor()).thenReturn(mockTableDesc);
    Stoppable stop = Mockito.mock(Stoppable.class);
    ExecutorService exec = Executors.newFixedThreadPool(1);
    Map<ImmutableBytesPtr, Table> tables = new LinkedHashMap<ImmutableBytesPtr, Table>();
    FakeTableFactory factory = new FakeTableFactory(tables);
    ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
    Put m = new Put(row);
    m.addColumn(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    Multimap<HTableInterfaceReference, Mutation> indexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
    indexUpdates.put(new HTableInterfaceReference(tableName), m);
    Table table = Mockito.mock(Table.class);
    final boolean[] completed = new boolean[] { false };
    Mockito.doAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            // just keep track that it was called
            completed[0] = true;
            return null;
        }
    }).when(table).batch(Mockito.anyList(), Mockito.any());
    Mockito.when(table.getName()).thenReturn(org.apache.hadoop.hbase.TableName.valueOf(test.getTableName()));
    // add the table to the set of tables, so its returned to the writer
    tables.put(tableName, table);
    // setup the writer and failure policy
    TrackingParallelWriterIndexCommitter writer = new TrackingParallelWriterIndexCommitter(VersionInfo.getVersion());
    writer.setup(factory, exec, stop, e);
    writer.write(indexUpdates, true, ScanUtil.UNKNOWN_CLIENT_VERSION);
    assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", completed[0]);
    writer.stop(this.test.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) LinkedHashMap(java.util.LinkedHashMap) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Table(org.apache.hadoop.hbase.client.Table) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Connection(org.apache.hadoop.hbase.client.Connection) Stoppable(org.apache.hadoop.hbase.Stoppable) TableDescriptor(org.apache.hadoop.hbase.client.TableDescriptor) Put(org.apache.hadoop.hbase.client.Put) InvocationOnMock(org.mockito.invocation.InvocationOnMock) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ExecutorService(java.util.concurrent.ExecutorService) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 19 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class TestPerRegionIndexWriteCache method testMultipleAddsForSingleRegion.

@Test
public void testMultipleAddsForSingleRegion() {
    PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache();
    HTableInterfaceReference t1 = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1")));
    List<Mutation> mutations = Lists.<Mutation>newArrayList(p);
    cache.addEdits(r1, t1, mutations);
    // add a second set
    mutations = Lists.<Mutation>newArrayList(p2);
    cache.addEdits(r1, t1, mutations);
    Multimap<HTableInterfaceReference, Mutation> edits = cache.getEdits(r1);
    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = edits.asMap().entrySet();
    assertEquals("Got more than one table in the the edit map!", 1, entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // ensure that we are still storing a list here - otherwise it breaks the parallel writer
        // implementation
        final List<Mutation> stored = (List<Mutation>) entry.getValue();
        assertEquals("Got an unexpected amount of mutations in the entry", 2, stored.size());
        assertEquals("Got an unexpected mutation in the entry", p, stored.get(0));
        assertEquals("Got an unexpected mutation in the entry", p2, stored.get(1));
    }
}
Also used : Entry(java.util.Map.Entry) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Collection(java.util.Collection) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 20 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class PhoenixIndexFailurePolicy method incrementPendingDisableCounter.

private static void incrementPendingDisableCounter(IndexWriteException indexWriteException, PhoenixConnection conn) {
    try {
        Set<String> indexesToUpdate = new HashSet<>();
        if (indexWriteException instanceof MultiIndexWriteFailureException) {
            MultiIndexWriteFailureException indexException = (MultiIndexWriteFailureException) indexWriteException;
            List<HTableInterfaceReference> failedIndexes = indexException.getFailedTables();
            if (indexException.isDisableIndexOnFailure() && failedIndexes != null) {
                for (HTableInterfaceReference failedIndex : failedIndexes) {
                    String failedIndexTable = failedIndex.getTableName();
                    if (!indexesToUpdate.contains(failedIndexTable)) {
                        incrementCounterForIndex(conn, failedIndexTable);
                        indexesToUpdate.add(failedIndexTable);
                    }
                }
            }
        } else if (indexWriteException instanceof SingleIndexWriteFailureException) {
            SingleIndexWriteFailureException indexException = (SingleIndexWriteFailureException) indexWriteException;
            String failedIndex = indexException.getTableName();
            if (indexException.isDisableIndexOnFailure() && failedIndex != null) {
                incrementCounterForIndex(conn, failedIndex);
            }
        }
    } catch (Exception handleE) {
        LOGGER.warn("Error while trying to handle index write exception", indexWriteException);
    }
}
Also used : SingleIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) MultiIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IndexWriteException(org.apache.phoenix.hbase.index.exception.IndexWriteException) MultiIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException) SQLException(java.sql.SQLException) IOException(java.io.IOException) SingleIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException) HashSet(java.util.HashSet)

Aggregations

HTableInterfaceReference (org.apache.phoenix.hbase.index.table.HTableInterfaceReference)22 Mutation (org.apache.hadoop.hbase.client.Mutation)20 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)13 Collection (java.util.Collection)9 Entry (java.util.Map.Entry)9 IOException (java.io.IOException)8 ArrayList (java.util.ArrayList)8 SingleIndexWriteFailureException (org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException)8 Put (org.apache.hadoop.hbase.client.Put)6 Table (org.apache.hadoop.hbase.client.Table)6 MultiIndexWriteFailureException (org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException)6 List (java.util.List)5 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)5 Pair (org.apache.hadoop.hbase.util.Pair)5 Test (org.junit.Test)5 HashMap (java.util.HashMap)4 ExecutionException (java.util.concurrent.ExecutionException)4 EarlyExitFailure (org.apache.phoenix.hbase.index.parallel.EarlyExitFailure)4 TaskBatch (org.apache.phoenix.hbase.index.parallel.TaskBatch)4 SQLException (java.sql.SQLException)3