Search in sources :

Example 61 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class SequenceRegionObserver method preAppend.

/**
     * Override the preAppend for checkAndPut and checkAndDelete, as we need the ability to
     * a) set the TimeRange for the Get being done and
     * b) return something back to the client to indicate success/failure
     */
@SuppressWarnings("deprecation")
@Override
public Result preAppend(final ObserverContext<RegionCoprocessorEnvironment> e, final Append append) throws IOException {
    byte[] opBuf = append.getAttribute(OPERATION_ATTRIB);
    if (opBuf == null) {
        return null;
    }
    Sequence.MetaOp op = Sequence.MetaOp.values()[opBuf[0]];
    Cell keyValue = append.getFamilyCellMap().values().iterator().next().iterator().next();
    long clientTimestamp = HConstants.LATEST_TIMESTAMP;
    long minGetTimestamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
    long maxGetTimestamp = HConstants.LATEST_TIMESTAMP;
    boolean hadClientTimestamp;
    byte[] clientTimestampBuf = null;
    if (op == Sequence.MetaOp.RETURN_SEQUENCE) {
        // When returning sequences, this allows us to send the expected timestamp
        // of the sequence to make sure we don't reset any other sequence
        hadClientTimestamp = true;
        clientTimestamp = minGetTimestamp = keyValue.getTimestamp();
        maxGetTimestamp = minGetTimestamp + 1;
    } else {
        clientTimestampBuf = append.getAttribute(MAX_TIMERANGE_ATTRIB);
        if (clientTimestampBuf != null) {
            clientTimestamp = maxGetTimestamp = Bytes.toLong(clientTimestampBuf);
        }
        hadClientTimestamp = (clientTimestamp != HConstants.LATEST_TIMESTAMP);
        if (hadClientTimestamp) {
            // created.
            if (op == Sequence.MetaOp.CREATE_SEQUENCE) {
                maxGetTimestamp = clientTimestamp + 1;
            }
        } else {
            clientTimestamp = maxGetTimestamp = EnvironmentEdgeManager.currentTimeMillis();
            clientTimestampBuf = Bytes.toBytes(clientTimestamp);
        }
    }
    RegionCoprocessorEnvironment env = e.getEnvironment();
    // We need to set this to prevent region.append from being called
    e.bypass();
    e.complete();
    Region region = env.getRegion();
    byte[] row = append.getRow();
    List<RowLock> locks = Lists.newArrayList();
    region.startRegionOperation();
    try {
        acquireLock(region, row, locks);
        try {
            byte[] family = CellUtil.cloneFamily(keyValue);
            byte[] qualifier = CellUtil.cloneQualifier(keyValue);
            Get get = new Get(row);
            get.setTimeRange(minGetTimestamp, maxGetTimestamp);
            get.addColumn(family, qualifier);
            Result result = region.get(get);
            if (result.isEmpty()) {
                if (op == Sequence.MetaOp.DROP_SEQUENCE || op == Sequence.MetaOp.RETURN_SEQUENCE) {
                    return getErrorResult(row, clientTimestamp, SQLExceptionCode.SEQUENCE_UNDEFINED.getErrorCode());
                }
            } else {
                if (op == Sequence.MetaOp.CREATE_SEQUENCE) {
                    return getErrorResult(row, clientTimestamp, SQLExceptionCode.SEQUENCE_ALREADY_EXIST.getErrorCode());
                }
            }
            Mutation m = null;
            switch(op) {
                case RETURN_SEQUENCE:
                    KeyValue currentValueKV = result.raw()[0];
                    long expectedValue = PLong.INSTANCE.getCodec().decodeLong(append.getAttribute(CURRENT_VALUE_ATTRIB), 0, SortOrder.getDefault());
                    long value = PLong.INSTANCE.getCodec().decodeLong(currentValueKV.getValueArray(), currentValueKV.getValueOffset(), SortOrder.getDefault());
                    // Timestamp should match exactly, or we may have the wrong sequence
                    if (expectedValue != value || currentValueKV.getTimestamp() != clientTimestamp) {
                        return Result.create(Collections.singletonList((Cell) KeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, currentValueKV.getTimestamp(), ByteUtil.EMPTY_BYTE_ARRAY)));
                    }
                    m = new Put(row, currentValueKV.getTimestamp());
                    m.getFamilyCellMap().putAll(append.getFamilyCellMap());
                    break;
                case DROP_SEQUENCE:
                    m = new Delete(row, clientTimestamp);
                    break;
                case CREATE_SEQUENCE:
                    m = new Put(row, clientTimestamp);
                    m.getFamilyCellMap().putAll(append.getFamilyCellMap());
                    break;
            }
            if (!hadClientTimestamp) {
                for (List<Cell> kvs : m.getFamilyCellMap().values()) {
                    for (Cell kv : kvs) {
                        ((KeyValue) kv).updateLatestStamp(clientTimestampBuf);
                    }
                }
            }
            Mutation[] mutations = new Mutation[] { m };
            region.batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
            long serverTimestamp = MetaDataUtil.getClientTimeStamp(m);
            // when the mutation was actually performed (useful in the case of .
            return Result.create(Collections.singletonList((Cell) KeyValueUtil.newKeyValue(row, PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES, serverTimestamp, SUCCESS_VALUE)));
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        ServerUtil.throwIOException("Increment of sequence " + Bytes.toStringBinary(row), t);
        // Impossible
        return null;
    } finally {
        region.closeRegionOperation();
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) KeyValue(org.apache.hadoop.hbase.KeyValue) Sequence(org.apache.phoenix.schema.Sequence) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) RegionCoprocessorEnvironment(org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment) Get(org.apache.hadoop.hbase.client.Get) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 62 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class UngroupedAggregateRegionObserver method commitBatchWithHTable.

private void commitBatchWithHTable(HTable table, Region region, List<Mutation> mutations, byte[] indexUUID, long blockingMemstoreSize, byte[] indexMaintainersPtr, byte[] txState, boolean useIndexProto) throws IOException {
    if (indexUUID != null) {
        // Need to add indexMaintainers for each mutation as table.batch can be distributed across servers
        for (Mutation m : mutations) {
            if (indexMaintainersPtr != null) {
                m.setAttribute(useIndexProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, indexMaintainersPtr);
            }
            if (txState != null) {
                m.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
            }
            m.setAttribute(PhoenixIndexCodec.INDEX_UUID, indexUUID);
        }
    }
    // flush happen which decrease the memstore size and then writes allowed on the region.
    for (int i = 0; region.getMemstoreSize() > blockingMemstoreSize && i < 30; i++) {
        try {
            checkForRegionClosing();
            Thread.sleep(100);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new IOException(e);
        }
    }
    logger.debug("Committing batch of " + mutations.size() + " mutations for " + table);
    try {
        table.batch(mutations);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
}
Also used : Mutation(org.apache.hadoop.hbase.client.Mutation) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException)

Example 63 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class UngroupedAggregateRegionObserver method rebuildIndices.

private RegionScanner rebuildIndices(final RegionScanner innerScanner, final Region region, final Scan scan, Configuration config) throws IOException {
    byte[] indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    boolean useProto = true;
    // for backward compatibility fall back to look up by the old attribute
    if (indexMetaData == null) {
        useProto = false;
        indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
    }
    boolean hasMore;
    int rowCount = 0;
    try {
        int maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
        long maxBatchSizeBytes = config.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
        MutationList mutations = new MutationList(maxBatchSize);
        region.startRegionOperation();
        byte[] uuidValue = ServerCacheClient.generateId();
        synchronized (innerScanner) {
            do {
                List<Cell> results = new ArrayList<Cell>();
                hasMore = innerScanner.nextRaw(results);
                if (!results.isEmpty()) {
                    Put put = null;
                    Delete del = null;
                    for (Cell cell : results) {
                        if (KeyValue.Type.codeToType(cell.getTypeByte()) == KeyValue.Type.Put) {
                            if (put == null) {
                                put = new Put(CellUtil.cloneRow(cell));
                                put.setAttribute(useProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, indexMetaData);
                                put.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                put.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
                                mutations.add(put);
                            }
                            put.add(cell);
                        } else {
                            if (del == null) {
                                del = new Delete(CellUtil.cloneRow(cell));
                                del.setAttribute(useProto ? PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, indexMetaData);
                                del.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
                                del.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
                                mutations.add(del);
                            }
                            del.addDeleteMarker(cell);
                        }
                    }
                    if (readyToCommit(rowCount, mutations.byteSize(), maxBatchSize, maxBatchSizeBytes)) {
                        region.batchMutate(mutations.toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
                        uuidValue = ServerCacheClient.generateId();
                        mutations.clear();
                    }
                    rowCount++;
                }
            } while (hasMore);
            if (!mutations.isEmpty()) {
                region.batchMutate(mutations.toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
            }
        }
    } catch (IOException e) {
        logger.error("IOException during rebuilding: " + Throwables.getStackTraceAsString(e));
        throw e;
    } finally {
        region.closeRegionOperation();
    }
    byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
    final KeyValue aggKeyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
    RegionScanner scanner = new BaseRegionScanner(innerScanner) {

        @Override
        public HRegionInfo getRegionInfo() {
            return region.getRegionInfo();
        }

        @Override
        public boolean isFilterDone() {
            return true;
        }

        @Override
        public void close() throws IOException {
        // no-op because we want to manage closing of the inner scanner ourselves.
        }

        @Override
        public boolean next(List<Cell> results) throws IOException {
            results.add(aggKeyValue);
            return false;
        }

        @Override
        public long getMaxResultSize() {
            return scan.getMaxResultSize();
        }
    };
    return scanner;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) KeyValue(org.apache.hadoop.hbase.KeyValue) ArrayList(java.util.ArrayList) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Put(org.apache.hadoop.hbase.client.Put) RegionScanner(org.apache.hadoop.hbase.regionserver.RegionScanner) EncodedColumnQualiferCellsList(org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList) ArrayList(java.util.ArrayList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell)

Example 64 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MetaDataEndpointImpl method dropFunction.

@Override
public void dropFunction(RpcController controller, DropFunctionRequest request, RpcCallback<MetaDataResponse> done) {
    byte[][] rowKeyMetaData = new byte[2][];
    byte[] functionName = null;
    try {
        List<Mutation> functionMetaData = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndFunctionName(functionMetaData, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        functionName = rowKeyMetaData[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX];
        byte[] lockKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionName);
        Region region = env.getRegion();
        MetaDataMutationResult result = checkFunctionKeyInRegion(lockKey, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        List<RowLock> locks = Lists.newArrayList();
        long clientTimeStamp = MetaDataUtil.getClientTimeStamp(functionMetaData);
        try {
            acquireLock(region, lockKey, locks);
            List<byte[]> keys = new ArrayList<byte[]>(1);
            keys.add(lockKey);
            List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
            result = doDropFunction(clientTimeStamp, keys, functionMetaData, invalidateList);
            if (result.getMutationCode() != MutationCode.FUNCTION_ALREADY_EXISTS) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            region.mutateRowsWithLocks(functionMetaData, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            long currentTime = MetaDataUtil.getClientTimeStamp(functionMetaData);
            for (ImmutableBytesPtr ptr : invalidateList) {
                metaDataCache.invalidate(ptr);
                metaDataCache.put(ptr, newDeletedFunctionMarker(currentTime));
            }
            done.run(MetaDataMutationResult.toProto(result));
            return;
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        logger.error("dropFunction failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(Bytes.toString(functionName), t));
    }
}
Also used : ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Example 65 with Mutation

use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.

the class MetaDataEndpointImpl method dropTable.

@Override
public void dropTable(RpcController controller, DropTableRequest request, RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    boolean isCascade = request.getCascade();
    byte[][] rowKeyMetaData = new byte[3][];
    String tableType = request.getTableType();
    byte[] schemaName = null;
    byte[] tableName = null;
    try {
        List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        // Disallow deletion of a system table
        if (tableType.equals(PTableType.SYSTEM.getSerializedValue())) {
            builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
            builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
            done.run(builder.build());
            return;
        }
        List<byte[]> tableNamesToDelete = Lists.newArrayList();
        List<SharedTableState> sharedTablesToDelete = Lists.newArrayList();
        byte[] parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
        byte[] lockTableName = parentTableName == null ? tableName : parentTableName;
        byte[] lockKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, lockTableName);
        byte[] key = parentTableName == null ? lockKey : SchemaUtil.getTableKey(tenantIdBytes, schemaName, tableName);
        Region region = env.getRegion();
        MetaDataMutationResult result = checkTableKeyInRegion(key, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        List<RowLock> locks = Lists.newArrayList();
        try {
            acquireLock(region, lockKey, locks);
            if (key != lockKey) {
                acquireLock(region, key, locks);
            }
            List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
            result = doDropTable(key, tenantIdBytes, schemaName, tableName, parentTableName, PTableType.fromSerializedValue(tableType), tableMetadata, invalidateList, locks, tableNamesToDelete, sharedTablesToDelete, isCascade);
            if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            // Commit the list of deletion.
            region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]>emptySet(), HConstants.NO_NONCE, HConstants.NO_NONCE);
            long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
            for (ImmutableBytesPtr ckey : invalidateList) {
                metaDataCache.put(ckey, newDeletedTableMarker(currentTime));
            }
            if (parentTableName != null) {
                ImmutableBytesPtr parentCacheKey = new ImmutableBytesPtr(lockKey);
                metaDataCache.invalidate(parentCacheKey);
            }
            done.run(MetaDataMutationResult.toProto(result));
            return;
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
        logger.error("dropTable failed", t);
        ProtobufUtil.setControllerException(controller, ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
Also used : MetaDataResponse(org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) ArrayList(java.util.ArrayList) ByteString(com.google.protobuf.ByteString) PMetaDataEntity(org.apache.phoenix.schema.PMetaDataEntity) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) RowLock(org.apache.hadoop.hbase.regionserver.Region.RowLock)

Aggregations

Mutation (org.apache.hadoop.hbase.client.Mutation)139 Put (org.apache.hadoop.hbase.client.Put)53 ArrayList (java.util.ArrayList)46 IOException (java.io.IOException)35 Delete (org.apache.hadoop.hbase.client.Delete)32 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)31 List (java.util.List)28 Cell (org.apache.hadoop.hbase.Cell)25 Pair (org.apache.hadoop.hbase.util.Pair)23 MetaDataMutationResult (org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult)23 HashMap (java.util.HashMap)19 PTable (org.apache.phoenix.schema.PTable)18 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)17 MetaDataResponse (org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataResponse)15 Region (org.apache.hadoop.hbase.regionserver.Region)14 RowLock (org.apache.hadoop.hbase.regionserver.Region.RowLock)14 Test (org.junit.Test)14 MutationCode (org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode)13 HTableInterface (org.apache.hadoop.hbase.client.HTableInterface)12 MutationProto (org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto)12