Search in sources :

Example 21 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class PhoenixIndexFailurePolicy method handleFailureWithExceptions.

private long handleFailureWithExceptions(Multimap<HTableInterfaceReference, Mutation> attempted, final Exception cause) throws Throwable {
    Set<HTableInterfaceReference> refs = attempted.asMap().keySet();
    final Map<String, Long> indexTableNames = new HashMap<String, Long>(refs.size());
    // start by looking at all the tables to which we attempted to write
    long timestamp = 0;
    final boolean leaveIndexActive = blockDataTableWritesOnFailure || !disableIndexOnFailure;
    // if using TrackingParallelWriter, we know which indexes failed and only disable those
    Set<HTableInterfaceReference> failedTables = cause instanceof MultiIndexWriteFailureException ? new HashSet<HTableInterfaceReference>(((MultiIndexWriteFailureException) cause).getFailedTables()) : Collections.<HTableInterfaceReference>emptySet();
    for (HTableInterfaceReference ref : refs) {
        if (failedTables.size() > 0 && !failedTables.contains(ref)) {
            // leave index active if its writes succeeded
            continue;
        }
        long minTimeStamp = 0;
        // get the minimum timestamp across all the mutations we attempted on that table
        // FIXME: all cell timestamps should be the same
        Collection<Mutation> mutations = attempted.get(ref);
        if (mutations != null) {
            for (Mutation m : mutations) {
                for (List<Cell> kvs : m.getFamilyCellMap().values()) {
                    for (Cell kv : kvs) {
                        if (minTimeStamp == 0 || (kv.getTimestamp() >= 0 && minTimeStamp > kv.getTimestamp())) {
                            minTimeStamp = kv.getTimestamp();
                        }
                    }
                }
            }
        }
        timestamp = minTimeStamp;
        // If the data table has local index column families then get local indexes to disable.
        if (ref.getTableName().equals(env.getRegion().getTableDescriptor().getTableName().getNameAsString()) && MetaDataUtil.hasLocalIndexColumnFamily(env.getRegion().getTableDescriptor())) {
            for (String tableName : getLocalIndexNames(ref, mutations)) {
                indexTableNames.put(tableName, minTimeStamp);
            }
            // client disables the index, so we pass the index names in the thrown exception
            if (cause instanceof MultiIndexWriteFailureException) {
                List<HTableInterfaceReference> failedLocalIndexes = Lists.newArrayList(Iterables.transform(indexTableNames.entrySet(), new Function<Map.Entry<String, Long>, HTableInterfaceReference>() {

                    @Override
                    public HTableInterfaceReference apply(Entry<String, Long> input) {
                        return new HTableInterfaceReference(new ImmutableBytesPtr(Bytes.toBytes(input.getKey())));
                    }
                }));
                ((MultiIndexWriteFailureException) cause).setFailedTables(failedLocalIndexes);
            }
        } else {
            indexTableNames.put(ref.getTableName(), minTimeStamp);
        }
    }
    // Nothing to do if we're not disabling the index and not rebuilding on failure
    if (!disableIndexOnFailure && !rebuildIndexOnFailure) {
        return timestamp;
    }
    final PIndexState newState = disableIndexOnFailure ? PIndexState.PENDING_DISABLE : PIndexState.PENDING_ACTIVE;
    final long fTimestamp = timestamp;
    // for all the index tables that we've found, try to disable them and if that fails, try to
    return User.runAsLoginUser(new PrivilegedExceptionAction<Long>() {

        @Override
        public Long run() throws Exception {
            for (Map.Entry<String, Long> tableTimeElement : indexTableNames.entrySet()) {
                String indexTableName = tableTimeElement.getKey();
                long minTimeStamp = tableTimeElement.getValue();
                // time stamp to differentiate.
                if (!disableIndexOnFailure && !blockDataTableWritesOnFailure) {
                    minTimeStamp *= -1;
                }
                // Disable the index by using the updateIndexState method of MetaDataProtocol end point coprocessor.
                try (Table systemTable = env.getConnection().getTable(SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, env.getConfiguration()))) {
                    MetaDataMutationResult result = IndexUtil.updateIndexState(indexTableName, minTimeStamp, systemTable, newState);
                    if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
                        LOGGER.info("Index " + indexTableName + " has been dropped. Ignore uncommitted mutations");
                        continue;
                    }
                    if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                        if (leaveIndexActive) {
                            LOGGER.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " + " failed with code = " + result.getMutationCode());
                            // will lead to the RS being shutdown.
                            if (blockDataTableWritesOnFailure) {
                                throw new DoNotRetryIOException("Attempt to update INDEX_DISABLE_TIMESTAMP failed.");
                            }
                        } else {
                            LOGGER.warn("Attempt to disable index " + indexTableName + " failed with code = " + result.getMutationCode() + ". Will use default failure policy instead.");
                            throw new DoNotRetryIOException("Attempt to disable " + indexTableName + " failed.");
                        }
                    }
                    LOGGER.info("Successfully update INDEX_DISABLE_TIMESTAMP for " + indexTableName + " due to an exception while" + " writing updates. indexState=" + newState, cause);
                } catch (Throwable t) {
                    if (t instanceof Exception) {
                        throw (Exception) t;
                    } else {
                        throw new Exception(t);
                    }
                }
            }
            // Return the cell time stamp (note they should all be the same)
            return fTimestamp;
        }
    });
}
Also used : HashMap(java.util.HashMap) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Function(org.apache.phoenix.thirdparty.com.google.common.base.Function) Entry(java.util.Map.Entry) Cell(org.apache.hadoop.hbase.Cell) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) PTable(org.apache.phoenix.schema.PTable) Table(org.apache.hadoop.hbase.client.Table) PIndexState(org.apache.phoenix.schema.PIndexState) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) MultiIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IndexWriteException(org.apache.phoenix.hbase.index.exception.IndexWriteException) MultiIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException) SQLException(java.sql.SQLException) IOException(java.io.IOException) SingleIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) Mutation(org.apache.hadoop.hbase.client.Mutation) Map(java.util.Map) HashMap(java.util.HashMap)

Example 22 with HTableInterfaceReference

use of org.apache.phoenix.hbase.index.table.HTableInterfaceReference in project phoenix by apache.

the class IndexWriter method resolveTableReferences.

/**
 * Convert the passed index updates to {@link HTableInterfaceReference}s.
 * @param indexUpdates from the index builder
 * @return pairs that can then be written by an {@link IndexWriter}.
 */
protected Multimap<HTableInterfaceReference, Mutation> resolveTableReferences(Collection<Pair<Mutation, byte[]>> indexUpdates) {
    Multimap<HTableInterfaceReference, Mutation> updates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
    // simple map to make lookups easy while we build the map of tables to create
    Map<ImmutableBytesPtr, HTableInterfaceReference> tables = new HashMap<ImmutableBytesPtr, HTableInterfaceReference>(updates.size());
    for (Pair<Mutation, byte[]> entry : indexUpdates) {
        byte[] tableName = entry.getSecond();
        ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
        HTableInterfaceReference table = tables.get(ptr);
        if (table == null) {
            table = new HTableInterfaceReference(ptr);
            tables.put(ptr, table);
        }
        updates.put(table, entry.getFirst());
    }
    return updates;
}
Also used : HashMap(java.util.HashMap) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Mutation(org.apache.hadoop.hbase.client.Mutation)

Aggregations

HTableInterfaceReference (org.apache.phoenix.hbase.index.table.HTableInterfaceReference)22 Mutation (org.apache.hadoop.hbase.client.Mutation)20 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)13 Collection (java.util.Collection)9 Entry (java.util.Map.Entry)9 IOException (java.io.IOException)8 ArrayList (java.util.ArrayList)8 SingleIndexWriteFailureException (org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException)8 Put (org.apache.hadoop.hbase.client.Put)6 Table (org.apache.hadoop.hbase.client.Table)6 MultiIndexWriteFailureException (org.apache.phoenix.hbase.index.exception.MultiIndexWriteFailureException)6 List (java.util.List)5 RegionCoprocessorEnvironment (org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment)5 Pair (org.apache.hadoop.hbase.util.Pair)5 Test (org.junit.Test)5 HashMap (java.util.HashMap)4 ExecutionException (java.util.concurrent.ExecutionException)4 EarlyExitFailure (org.apache.phoenix.hbase.index.parallel.EarlyExitFailure)4 TaskBatch (org.apache.phoenix.hbase.index.parallel.TaskBatch)4 SQLException (java.sql.SQLException)3