Search in sources :

Example 6 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class PhoenixIndexFailurePolicy method handleFailureWithExceptions.

private long handleFailureWithExceptions(Multimap<HTableInterfaceReference, Mutation> attempted, Exception cause) throws Throwable {
    Set<HTableInterfaceReference> refs = attempted.asMap().keySet();
    Map<String, Long> indexTableNames = new HashMap<String, Long>(refs.size());
    // start by looking at all the tables to which we attempted to write
    long timestamp = 0;
    boolean leaveIndexActive = blockDataTableWritesOnFailure || !disableIndexOnFailure;
    for (HTableInterfaceReference ref : refs) {
        long minTimeStamp = 0;
        // get the minimum timestamp across all the mutations we attempted on that table
        // FIXME: all cell timestamps should be the same
        Collection<Mutation> mutations = attempted.get(ref);
        if (mutations != null) {
            for (Mutation m : mutations) {
                for (List<Cell> kvs : m.getFamilyCellMap().values()) {
                    for (Cell kv : kvs) {
                        if (minTimeStamp == 0 || (kv.getTimestamp() >= 0 && minTimeStamp > kv.getTimestamp())) {
                            minTimeStamp = kv.getTimestamp();
                        }
                    }
                }
            }
        }
        timestamp = minTimeStamp;
        // If the data table has local index column families then get local indexes to disable.
        if (ref.getTableName().equals(env.getRegion().getTableDesc().getNameAsString()) && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDesc())) {
            for (String tableName : getLocalIndexNames(ref, mutations)) {
                indexTableNames.put(tableName, minTimeStamp);
            }
        } else {
            indexTableNames.put(ref.getTableName(), minTimeStamp);
        }
    }
    // Nothing to do if we're not disabling the index and not rebuilding on failure
    if (!disableIndexOnFailure && !rebuildIndexOnFailure) {
        return timestamp;
    }
    PIndexState newState = disableIndexOnFailure ? PIndexState.DISABLE : PIndexState.ACTIVE;
    // for all the index tables that we've found, try to disable them and if that fails, try to
    for (Map.Entry<String, Long> tableTimeElement : indexTableNames.entrySet()) {
        String indexTableName = tableTimeElement.getKey();
        long minTimeStamp = tableTimeElement.getValue();
        // time stamp to differentiate.
        if (!disableIndexOnFailure && !blockDataTableWritesOnFailure) {
            minTimeStamp *= -1;
        }
        // Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor.
        HTableInterface systemTable = env.getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()));
        MetaDataMutationResult result = IndexUtil.setIndexDisableTimeStamp(indexTableName, minTimeStamp, systemTable, newState);
        if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
            LOG.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
            continue;
        }
        if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
            if (leaveIndexActive) {
                LOG.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = " + result.getMutationCode());
                // will lead to the RS being shutdown.
                if (blockDataTableWritesOnFailure) {
                    throw new DoNotRetryIOException("Attempt to update INDEX_DISABLE_TIMESTAMP failed.");
                }
            } else {
                LOG.warn("Attempt to disable index " + indexTableName + " failed with code = " + result.getMutationCode() + ". Will use default failure policy instead.");
                throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
            }
        }
        if (leaveIndexActive)
            LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName + " due to an exception while writing updates.", cause);
        else
            LOG.info("Successfully disabled index " + indexTableName + " due to an exception while writing updates.", cause);
    }
    // Return the cell time stamp (note they should all be the same)
    return timestamp;
}
Also used : HashMap(java.util.HashMap) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) PIndexState(org.apache.phoenix.schema.PIndexState) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) HashMap(java.util.HashMap) Map(java.util.Map) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 7 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class TestPerRegionIndexWriteCache method testMultipleRegions.

@Test
public void testMultipleRegions() {
    PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache();
    HTableInterfaceReference t1 = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1")));
    List<Mutation> mutations = Lists.<Mutation>newArrayList(p);
    List<Mutation> m2 = Lists.<Mutation>newArrayList(p2);
    // add each region
    cache.addEdits(r1, t1, mutations);
    cache.addEdits(r2, t1, m2);
    // check region1
    Multimap<HTableInterfaceReference, Mutation> edits = cache.getEdits(r1);
    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = edits.asMap().entrySet();
    assertEquals("Got more than one table in the the edit map!", 1, entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // ensure that we are still storing a list here - otherwise it breaks the parallel writer
        // implementation
        final List<Mutation> stored = (List<Mutation>) entry.getValue();
        assertEquals("Got an unexpected amount of mutations in the entry for region1", 1, stored.size());
        assertEquals("Got an unexpected mutation in the entry for region2", p, stored.get(0));
    }
    // check region2
    edits = cache.getEdits(r2);
    entries = edits.asMap().entrySet();
    assertEquals("Got more than one table in the the edit map!", 1, entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // ensure that we are still storing a list here - otherwise it breaks the parallel writer
        // implementation
        final List<Mutation> stored = (List<Mutation>) entry.getValue();
        assertEquals("Got an unexpected amount of mutations in the entry for region2", 1, stored.size());
        assertEquals("Got an unexpected mutation in the entry for region2", p2, stored.get(0));
    }
    // ensure that a second get doesn't have any more edits. This ensures that we don't keep
    // references around to these edits and have a memory leak
    assertNull("Got an entry for a region we removed", cache.getEdits(r1));
}
Also used : Entry(java.util.Map.Entry) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Collection(java.util.Collection) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 8 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class TestPerRegionIndexWriteCache method testAddRemoveSingleRegion.

@Test
public void testAddRemoveSingleRegion() {
    PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache();
    HTableInterfaceReference t1 = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1")));
    List<Mutation> mutations = new ArrayList<Mutation>();
    mutations.add(p);
    cache.addEdits(r1, t1, mutations);
    Multimap<HTableInterfaceReference, Mutation> edits = cache.getEdits(r1);
    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = edits.asMap().entrySet();
    assertEquals("Got more than one table in the the edit map!", 1, entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        //ensure that we are still storing a list here - otherwise it breaks the parallel writer implementation
        final List<Mutation> stored = (List<Mutation>) entry.getValue();
        assertEquals("Got an unexpected amount of mutations in the entry", 1, stored.size());
        assertEquals("Got an unexpected mutation in the entry", p, stored.get(0));
    }
    // ensure that a second get doesn't have any more edits. This ensures that we don't keep
    // references around to these edits and have a memory leak
    assertNull("Got an entry for a region we removed", cache.getEdits(r1));
}
Also used : Entry(java.util.Map.Entry) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) Collection(java.util.Collection) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 9 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class TestPerRegionIndexWriteCache method testMultipleAddsForSingleRegion.

@Test
public void testMultipleAddsForSingleRegion() {
    PerRegionIndexWriteCache cache = new PerRegionIndexWriteCache();
    HTableInterfaceReference t1 = new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes("t1")));
    List<Mutation> mutations = Lists.<Mutation>newArrayList(p);
    cache.addEdits(r1, t1, mutations);
    // add a second set
    mutations = Lists.<Mutation>newArrayList(p2);
    cache.addEdits(r1, t1, mutations);
    Multimap<HTableInterfaceReference, Mutation> edits = cache.getEdits(r1);
    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = edits.asMap().entrySet();
    assertEquals("Got more than one table in the the edit map!", 1, entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // ensure that we are still storing a list here - otherwise it breaks the parallel writer
        // implementation
        final List<Mutation> stored = (List<Mutation>) entry.getValue();
        assertEquals("Got an unexpected amount of mutations in the entry", 2, stored.size());
        assertEquals("Got an unexpected mutation in the entry", p, stored.get(0));
        assertEquals("Got an unexpected mutation in the entry", p2, stored.get(1));
    }
}
Also used : Entry(java.util.Map.Entry) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Collection(java.util.Collection) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Example 10 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class TestParalleWriterIndexCommitter method testSynchronouslyCompletesAllWrites.

@SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testSynchronouslyCompletesAllWrites() throws Exception {
    LOG.info("Starting " + test.getTableNameString());
    LOG.info("Current thread is interrupted: " + Thread.interrupted());
    Abortable abort = new StubAbortable();
    RegionCoprocessorEnvironment e = Mockito.mock(RegionCoprocessorEnvironment.class);
    Configuration conf = new Configuration();
    Mockito.when(e.getConfiguration()).thenReturn(conf);
    Mockito.when(e.getSharedData()).thenReturn(new ConcurrentHashMap<String, Object>());
    Stoppable stop = Mockito.mock(Stoppable.class);
    ExecutorService exec = Executors.newFixedThreadPool(1);
    Map<ImmutableBytesPtr, HTableInterface> tables = new HashMap<ImmutableBytesPtr, HTableInterface>();
    FakeTableFactory factory = new FakeTableFactory(tables);
    ImmutableBytesPtr tableName = new ImmutableBytesPtr(this.test.getTableName());
    Put m = new Put(row);
    m.add(Bytes.toBytes("family"), Bytes.toBytes("qual"), null);
    Multimap<HTableInterfaceReference, Mutation> indexUpdates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
    indexUpdates.put(new HTableInterfaceReference(tableName), m);
    HTableInterface table = Mockito.mock(HTableInterface.class);
    final boolean[] completed = new boolean[] { false };
    Mockito.when(table.batch(Mockito.anyList())).thenAnswer(new Answer<Void>() {

        @Override
        public Void answer(InvocationOnMock invocation) throws Throwable {
            // just keep track that it was called
            completed[0] = true;
            return null;
        }
    });
    Mockito.when(table.getTableName()).thenReturn(test.getTableName());
    // add the table to the set of tables, so its returned to the writer
    tables.put(tableName, table);
    // setup the writer and failure policy
    ParallelWriterIndexCommitter writer = new ParallelWriterIndexCommitter(VersionInfo.getVersion());
    writer.setup(factory, exec, abort, stop, 1, e);
    writer.write(indexUpdates, true);
    assertTrue("Writer returned before the table batch completed! Likely a race condition tripped", completed[0]);
    writer.stop(this.test.getTableNameString() + " finished");
    assertTrue("Factory didn't get shutdown after writer#stop!", factory.shutdown);
    assertTrue("ExectorService isn't terminated after writer#stop!", exec.isShutdown());
}
Also used : StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) Configuration(org.apache.hadoop.conf.Configuration) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Abortable(org.apache.hadoop.hbase.Abortable) StubAbortable(org.apache.phoenix.hbase.index.StubAbortable) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Stoppable(org.apache.hadoop.hbase.Stoppable) Put(org.apache.hadoop.hbase.client.Put) InvocationOnMock(org.mockito.invocation.InvocationOnMock) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ExecutorService(java.util.concurrent.ExecutorService) Mutation(org.apache.hadoop.hbase.client.Mutation) Test(org.junit.Test)

Aggregations

Mutation (org.apache.hadoop.hbase.client.Mutation)10 HTableInterfaceReference (org.apache.phoenix.hbase.index.table.HTableInterfaceReference)10 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)7 Collection (java.util.Collection)5 HashMap (java.util.HashMap)5 Entry (java.util.Map.Entry)5 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)5 Test (org.junit.Test)5 ArrayList (java.util.ArrayList)4 List (java.util.List)4 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)3 IOException (java.io.IOException)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 ExecutionException (java.util.concurrent.ExecutionException)2 ExecutorService (java.util.concurrent.ExecutorService)2 Configuration (org.apache.hadoop.conf.Configuration)2 Abortable (org.apache.hadoop.hbase.Abortable)2 Stoppable (org.apache.hadoop.hbase.Stoppable)2 Put (org.apache.hadoop.hbase.client.Put)2 StubAbortable (org.apache.phoenix.hbase.index.StubAbortable)2