Search in sources :

Example 96 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class ConnectionQueryServicesImpl method addColumnQualifierColumn.

// Special method for adding the column qualifier column for 4.10. 
private PhoenixConnection addColumnQualifierColumn(PhoenixConnection oldMetaConnection, Long timestamp) throws SQLException {
    Properties props = PropertiesUtil.deepCopy(oldMetaConnection.getClientInfo());
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(timestamp));
    // Cannot go through DriverManager or you end up in an infinite loop because it'll call init again
    PhoenixConnection metaConnection = new PhoenixConnection(oldMetaConnection, this, props);
    PTable sysCatalogPTable = metaConnection.getTable(new PTableKey(null, PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME));
    int numColumns = sysCatalogPTable.getColumns().size();
    try (PreparedStatement mutateTable = metaConnection.prepareStatement(MetaDataClient.MUTATE_TABLE)) {
        mutateTable.setString(1, null);
        mutateTable.setString(2, SYSTEM_CATALOG_SCHEMA);
        mutateTable.setString(3, SYSTEM_CATALOG_TABLE);
        mutateTable.setString(4, PTableType.SYSTEM.getSerializedValue());
        mutateTable.setLong(5, sysCatalogPTable.getSequenceNumber() + 1);
        mutateTable.setInt(6, numColumns + 1);
        mutateTable.execute();
    }
    List<Mutation> tableMetadata = new ArrayList<>();
    tableMetadata.addAll(metaConnection.getMutationState().toMutations(metaConnection.getSCN()).next().getSecond());
    metaConnection.rollback();
    PColumn column = new PColumnImpl(PNameFactory.newName("COLUMN_QUALIFIER"), PNameFactory.newName(DEFAULT_COLUMN_FAMILY_NAME), PVarbinary.INSTANCE, null, null, true, numColumns, SortOrder.ASC, null, null, false, null, false, false, Bytes.toBytes("COLUMN_QUALIFIER"));
    String upsertColumnMetadata = "UPSERT INTO " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + "\"( " + TENANT_ID + "," + TABLE_SCHEM + "," + TABLE_NAME + "," + COLUMN_NAME + "," + COLUMN_FAMILY + "," + DATA_TYPE + "," + NULLABLE + "," + COLUMN_SIZE + "," + DECIMAL_DIGITS + "," + ORDINAL_POSITION + "," + SORT_ORDER + "," + DATA_TABLE_NAME + "," + ARRAY_SIZE + "," + VIEW_CONSTANT + "," + IS_VIEW_REFERENCED + "," + PK_NAME + "," + KEY_SEQ + "," + COLUMN_DEF + "," + IS_ROW_TIMESTAMP + ") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
    try (PreparedStatement colUpsert = metaConnection.prepareStatement(upsertColumnMetadata)) {
        colUpsert.setString(1, null);
        colUpsert.setString(2, SYSTEM_CATALOG_SCHEMA);
        colUpsert.setString(3, SYSTEM_CATALOG_TABLE);
        colUpsert.setString(4, "COLUMN_QUALIFIER");
        colUpsert.setString(5, DEFAULT_COLUMN_FAMILY);
        colUpsert.setInt(6, column.getDataType().getSqlType());
        colUpsert.setInt(7, ResultSetMetaData.columnNullable);
        colUpsert.setNull(8, Types.INTEGER);
        colUpsert.setNull(9, Types.INTEGER);
        colUpsert.setInt(10, sysCatalogPTable.getBucketNum() != null ? numColumns : (numColumns + 1));
        colUpsert.setInt(11, SortOrder.ASC.getSystemValue());
        colUpsert.setString(12, null);
        colUpsert.setNull(13, Types.INTEGER);
        colUpsert.setBytes(14, null);
        colUpsert.setBoolean(15, false);
        colUpsert.setString(16, sysCatalogPTable.getPKName() == null ? null : sysCatalogPTable.getPKName().getString());
        colUpsert.setNull(17, Types.SMALLINT);
        colUpsert.setNull(18, Types.VARCHAR);
        colUpsert.setBoolean(19, false);
        colUpsert.execute();
    }
    tableMetadata.addAll(metaConnection.getMutationState().toMutations(metaConnection.getSCN()).next().getSecond());
    metaConnection.rollback();
    metaConnection.getQueryServices().addColumn(tableMetadata, sysCatalogPTable, Collections.<String, List<Pair<String, Object>>>emptyMap(), Collections.<String>emptySet(), Lists.newArrayList(column));
    metaConnection.removeTable(null, SYSTEM_CATALOG_NAME, null, timestamp);
    ConnectionQueryServicesImpl.this.removeTable(null, SYSTEM_CATALOG_NAME, null, timestamp);
    clearCache();
    return metaConnection;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) PColumnImpl(org.apache.phoenix.schema.PColumnImpl) ArrayList(java.util.ArrayList) PreparedStatement(java.sql.PreparedStatement) Properties(java.util.Properties) PTable(org.apache.phoenix.schema.PTable) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) PColumn(org.apache.phoenix.schema.PColumn) Mutation(org.apache.hadoop.hbase.client.Mutation) PTableKey(org.apache.phoenix.schema.PTableKey) Pair(org.apache.hadoop.hbase.util.Pair)

Example 97 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class ConnectionlessQueryServicesImpl method getTableName.

private static byte[] getTableName(List<Mutation> tableMetaData, byte[] physicalTableName) {
    if (physicalTableName != null) {
        return physicalTableName;
    }
    byte[][] rowKeyMetadata = new byte[3][];
    Mutation m = MetaDataUtil.getTableHeaderRow(tableMetaData);
    byte[] key = m.getRow();
    SchemaUtil.getVarChars(key, rowKeyMetadata);
    byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    return SchemaUtil.getTableNameAsBytes(schemaBytes, tableBytes);
}
Also used : Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 98 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MetaDataClient method createFunction.

public MutationState createFunction(CreateFunctionStatement stmt) throws SQLException {
    boolean wasAutoCommit = connection.getAutoCommit();
    connection.rollback();
    try {
        PFunction function = new PFunction(stmt.getFunctionInfo(), stmt.isTemporary(), stmt.isReplace());
        connection.setAutoCommit(false);
        String tenantIdStr = connection.getTenantId() == null ? null : connection.getTenantId().getString();
        List<Mutation> functionData = Lists.newArrayListWithExpectedSize(function.getFunctionArguments().size() + 1);
        List<FunctionArgument> args = function.getFunctionArguments();
        try (PreparedStatement argUpsert = connection.prepareStatement(INSERT_FUNCTION_ARGUMENT)) {
            for (int i = 0; i < args.size(); i++) {
                FunctionArgument arg = args.get(i);
                addFunctionArgMutation(function.getFunctionName(), arg, argUpsert, i);
            }
            functionData.addAll(connection.getMutationState().toMutations().next().getSecond());
            connection.rollback();
        }
        try (PreparedStatement functionUpsert = connection.prepareStatement(CREATE_FUNCTION)) {
            functionUpsert.setString(1, tenantIdStr);
            functionUpsert.setString(2, function.getFunctionName());
            functionUpsert.setInt(3, function.getFunctionArguments().size());
            functionUpsert.setString(4, function.getClassName());
            functionUpsert.setString(5, function.getJarPath());
            functionUpsert.setString(6, function.getReturnType());
            functionUpsert.execute();
            functionData.addAll(connection.getMutationState().toMutations(null).next().getSecond());
            connection.rollback();
        }
        MetaDataMutationResult result = connection.getQueryServices().createFunction(functionData, function, stmt.isTemporary());
        MutationCode code = result.getMutationCode();
        switch(code) {
            case FUNCTION_ALREADY_EXISTS:
                if (!function.isReplace()) {
                    throw new FunctionAlreadyExistsException(function.getFunctionName(), result.getFunctions().get(0));
                } else {
                    connection.removeFunction(function.getTenantId(), function.getFunctionName(), result.getMutationTime());
                    addFunctionToCache(result);
                }
            case NEWER_FUNCTION_FOUND:
                // it to this connection as we can't see it.
                throw new NewerFunctionAlreadyExistsException(function.getFunctionName(), result.getFunctions().get(0));
            default:
                List<PFunction> functions = new ArrayList<PFunction>(1);
                functions.add(function);
                result = new MetaDataMutationResult(code, result.getMutationTime(), functions, true);
                if (function.isReplace()) {
                    connection.removeFunction(function.getTenantId(), function.getFunctionName(), result.getMutationTime());
                }
                addFunctionToCache(result);
        }
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
    return new MutationState(1, 1000, connection);
}
Also used : PFunction(org.apache.phoenix.parse.PFunction) ArrayList(java.util.ArrayList) PreparedStatement(java.sql.PreparedStatement) IndexKeyConstraint(org.apache.phoenix.parse.IndexKeyConstraint) PrimaryKeyConstraint(org.apache.phoenix.parse.PrimaryKeyConstraint) ColumnDefInPkConstraint(org.apache.phoenix.parse.ColumnDefInPkConstraint) MutationCode(org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode) MutationState(org.apache.phoenix.execute.MutationState) Mutation(org.apache.hadoop.hbase.client.Mutation) FunctionArgument(org.apache.phoenix.parse.PFunction.FunctionArgument) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)

Example 99 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class IndexWriter method resolveTableReferences.

/**
   * Convert the passed index updates to {@link HTableInterfaceReference}s.
   * @param indexUpdates from the index builder
   * @return pairs that can then be written by an {@link IndexWriter}.
   */
protected Multimap<HTableInterfaceReference, Mutation> resolveTableReferences(Collection<Pair<Mutation, byte[]>> indexUpdates) {
    Multimap<HTableInterfaceReference, Mutation> updates = ArrayListMultimap.<HTableInterfaceReference, Mutation>create();
    // simple map to make lookups easy while we build the map of tables to create
    Map<ImmutableBytesPtr, HTableInterfaceReference> tables = new HashMap<ImmutableBytesPtr, HTableInterfaceReference>(updates.size());
    for (Pair<Mutation, byte[]> entry : indexUpdates) {
        byte[] tableName = entry.getSecond();
        ImmutableBytesPtr ptr = new ImmutableBytesPtr(tableName);
        HTableInterfaceReference table = tables.get(ptr);
        if (table == null) {
            table = new HTableInterfaceReference(ptr);
            tables.put(ptr, table);
        }
        updates.put(table, entry.getFirst());
    }
    return updates;
}
Also used : HashMap(java.util.HashMap) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 100 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class ParallelWriterIndexCommitter method write.

@Override
public void write(Multimap<HTableInterfaceReference, Mutation> toWrite, final boolean allowLocalUpdates) throws SingleIndexWriteFailureException {
    /*
         * This bit here is a little odd, so let's explain what's going on. Basically, we want to do the writes in
         * parallel to each index table, so each table gets its own task and is submitted to the pool. Where it gets
         * tricky is that we want to block the calling thread until one of two things happens: (1) all index tables get
         * successfully updated, or (2) any one of the index table writes fail; in either case, we should return as
         * quickly as possible. We get a little more complicated in that if we do get a single failure, but any of the
         * index writes hasn't been started yet (its been queued up, but not submitted to a thread) we want to that task
         * to fail immediately as we know that write is a waste and will need to be replayed anyways.
         */
    Set<Entry<HTableInterfaceReference, Collection<Mutation>>> entries = toWrite.asMap().entrySet();
    TaskBatch<Void> tasks = new TaskBatch<Void>(entries.size());
    for (Entry<HTableInterfaceReference, Collection<Mutation>> entry : entries) {
        // get the mutations for each table. We leak the implementation here a little bit to save
        // doing a complete copy over of all the index update for each table.
        final List<Mutation> mutations = kvBuilder.cloneIfNecessary((List<Mutation>) entry.getValue());
        final HTableInterfaceReference tableReference = entry.getKey();
        if (env != null && !allowLocalUpdates && tableReference.getTableName().equals(env.getRegion().getTableDesc().getNameAsString())) {
            continue;
        }
        /*
             * Write a batch of index updates to an index table. This operation stops (is cancelable) via two
             * mechanisms: (1) setting aborted or stopped on the IndexWriter or, (2) interrupting the running thread.
             * The former will only work if we are not in the midst of writing the current batch to the table, though we
             * do check these status variables before starting and before writing the batch. The latter usage,
             * interrupting the thread, will work in the previous situations as was at some points while writing the
             * batch, depending on the underlying writer implementation (HTableInterface#batch is blocking, but doesn't
             * elaborate when is supports an interrupt).
             */
        tasks.add(new Task<Void>() {

            /**
                 * Do the actual write to the primary table. We don't need to worry about closing the table because that
                 * is handled the {@link CachingHTableFactory}.
                 * 
                 * @return
                 */
            @SuppressWarnings("deprecation")
            @Override
            public Void call() throws Exception {
                // this may have been queued, so another task infront of us may have failed, so we should
                // early exit, if that's the case
                throwFailureIfDone();
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Writing index update:" + mutations + " to table: " + tableReference);
                }
                HTableInterface table = null;
                try {
                    if (allowLocalUpdates && env != null && tableReference.getTableName().equals(env.getRegion().getTableDesc().getNameAsString())) {
                        try {
                            throwFailureIfDone();
                            IndexUtil.writeLocalUpdates(env.getRegion(), mutations, true);
                            return null;
                        } catch (IOException ignord) {
                            // when it's failed we fall back to the standard & slow way
                            if (LOG.isTraceEnabled()) {
                                LOG.trace("indexRegion.batchMutate failed and fall back to HTable.batch(). Got error=" + ignord);
                            }
                        }
                    }
                    table = factory.getTable(tableReference.get());
                    throwFailureIfDone();
                    table.batch(mutations);
                } catch (SingleIndexWriteFailureException e) {
                    throw e;
                } catch (IOException e) {
                    throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e);
                } catch (InterruptedException e) {
                    // reset the interrupt status on the thread
                    Thread.currentThread().interrupt();
                    throw new SingleIndexWriteFailureException(tableReference.toString(), mutations, e);
                } finally {
                    if (table != null) {
                        table.close();
                    }
                }
                return null;
            }

            private void throwFailureIfDone() throws SingleIndexWriteFailureException {
                if (this.isBatchFailed() || Thread.currentThread().isInterrupted()) {
                    throw new SingleIndexWriteFailureException("Pool closed, not attempting to write to the index!", null);
                }
            }
        });
    }
    // actually submit the tasks to the pool and wait for them to finish/fail
    try {
        pool.submitUninterruptible(tasks);
    } catch (EarlyExitFailure e) {
        propagateFailure(e);
    } catch (ExecutionException e) {
        LOG.error("Found a failed index update!");
        propagateFailure(e.getCause());
    }
}
Also used : IOException(java.io.IOException) TaskBatch(org.apache.phoenix.hbase.index.parallel.TaskBatch) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) SingleIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException) Entry(java.util.Map.Entry) SingleIndexWriteFailureException(org.apache.phoenix.hbase.index.exception.SingleIndexWriteFailureException) HTableInterfaceReference(org.apache.phoenix.hbase.index.table.HTableInterfaceReference) Collection(java.util.Collection) Mutation(org.apache.hadoop.hbase.client.Mutation) EarlyExitFailure(org.apache.phoenix.hbase.index.parallel.EarlyExitFailure) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

Mutation (org.apache.hadoop.hbase.client.Mutation)139 Put (org.apache.hadoop.hbase.client.Put)53 ArrayList (java.util.ArrayList)46 IOException (java.io.IOException)35 Delete (org.apache.hadoop.hbase.client.Delete)32 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)31 List (java.util.List)28 Cell (org.apache.hadoop.hbase.Cell)25 Pair (org.apache.hadoop.hbase.util.Pair)23 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)23 HashMap (java.util.HashMap)19 PTable (org.apache.phoenix.schema.PTable)18 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)17 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)15 Region (org.apache.hadoop.hbase.regionserver.Region)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)14 Test (org.junit.Test)14 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)12