Search in sources :

Example 96 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class PhoenixIndexBuilder method executeAtomicOp.

@Override
public List<Mutation> executeAtomicOp(Increment inc) throws IOException {
    byte[] opBytes = inc.getAttribute(ATOMIC_OP_ATTRIB);
    if (opBytes == null) {
        // Unexpected
        return null;
    }
    inc.setAttribute(ATOMIC_OP_ATTRIB, null);
    Put put = null;
    Delete delete = null;
    // We cannot neither use the time stamp in the Increment to set the Get time range
    // nor set the Put/Delete time stamp and have this be atomic as HBase does not
    // handle that. Though we disallow using ON DUPLICATE KEY clause when the
    // CURRENT_SCN is set, we still may have a time stamp set as of when the table
    // was resolved on the client side. We need to ignore this as well due to limitations
    // in HBase, but this isn't too bad as the time will be very close the the current
    // time anyway.
    long ts = HConstants.LATEST_TIMESTAMP;
    byte[] rowKey = inc.getRow();
    final Get get = new Get(rowKey);
    if (isDupKeyIgnore(opBytes)) {
        get.setFilter(new FirstKeyOnlyFilter());
        Result result = this.env.getRegion().get(get);
        return result.isEmpty() ? convertIncrementToPutInSingletonList(inc) : Collections.<Mutation>emptyList();
    }
    ByteArrayInputStream stream = new ByteArrayInputStream(opBytes);
    DataInputStream input = new DataInputStream(stream);
    boolean skipFirstOp = input.readBoolean();
    short repeat = input.readShort();
    final int[] estimatedSizeHolder = { 0 };
    List<Pair<PTable, List<Expression>>> operations = Lists.newArrayListWithExpectedSize(3);
    while (true) {
        ExpressionVisitor<Void> visitor = new StatelessTraverseAllExpressionVisitor<Void>() {

            @Override
            public Void visit(KeyValueColumnExpression expression) {
                get.addColumn(expression.getColumnFamily(), expression.getColumnQualifier());
                estimatedSizeHolder[0]++;
                return null;
            }
        };
        try {
            int nExpressions = WritableUtils.readVInt(input);
            List<Expression> expressions = Lists.newArrayListWithExpectedSize(nExpressions);
            for (int i = 0; i < nExpressions; i++) {
                Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
                expression.readFields(input);
                expressions.add(expression);
                expression.accept(visitor);
            }
            PTableProtos.PTable tableProto = PTableProtos.PTable.parseDelimitedFrom(input);
            PTable table = PTableImpl.createFromProto(tableProto);
            operations.add(new Pair<>(table, expressions));
        } catch (EOFException e) {
            break;
        }
    }
    int estimatedSize = estimatedSizeHolder[0];
    if (get.getFamilyMap().isEmpty()) {
        get.setFilter(new FirstKeyOnlyFilter());
    }
    MultiKeyValueTuple tuple;
    List<Cell> flattenedCells = null;
    List<Cell> cells = ((HRegion) this.env.getRegion()).get(get, false);
    if (cells.isEmpty()) {
        if (skipFirstOp) {
            if (operations.size() <= 1 && repeat <= 1) {
                return convertIncrementToPutInSingletonList(inc);
            }
            // Skip first operation (if first wasn't ON DUPLICATE KEY IGNORE)
            repeat--;
        }
        // Base current state off of new row
        flattenedCells = flattenCells(inc, estimatedSize);
        tuple = new MultiKeyValueTuple(flattenedCells);
    } else {
        // Base current state off of existing row
        tuple = new MultiKeyValueTuple(cells);
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    for (int opIndex = 0; opIndex < operations.size(); opIndex++) {
        Pair<PTable, List<Expression>> operation = operations.get(opIndex);
        PTable table = operation.getFirst();
        List<Expression> expressions = operation.getSecond();
        for (int j = 0; j < repeat; j++) {
            // repeater loop
            ptr.set(rowKey);
            // executed, not when the outer loop is exited. Hence we do it here, at the top of the loop.
            if (flattenedCells != null) {
                Collections.sort(flattenedCells, KeyValue.COMPARATOR);
            }
            PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false);
            int adjust = table.getBucketNum() == null ? 1 : 2;
            for (int i = 0; i < expressions.size(); i++) {
                Expression expression = expressions.get(i);
                ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
                expression.evaluate(tuple, ptr);
                PColumn column = table.getColumns().get(i + adjust);
                Object value = expression.getDataType().toObject(ptr, column.getSortOrder());
                // same type.
                if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) {
                    throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), column.getScale());
                }
                column.getDataType().coerceBytes(ptr, value, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable());
                byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
                row.setValue(column, bytes);
            }
            flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize);
            List<Mutation> mutations = row.toRowMutations();
            for (Mutation source : mutations) {
                flattenCells(source, flattenedCells);
            }
            tuple.setKeyValues(flattenedCells);
        }
        // Repeat only applies to first statement
        repeat = 1;
    }
    List<Mutation> mutations = Lists.newArrayListWithExpectedSize(2);
    for (int i = 0; i < tuple.size(); i++) {
        Cell cell = tuple.getValue(i);
        if (Type.codeToType(cell.getTypeByte()) == Type.Put) {
            if (put == null) {
                put = new Put(rowKey);
                transferAttributes(inc, put);
                mutations.add(put);
            }
            put.add(cell);
        } else {
            if (delete == null) {
                delete = new Delete(rowKey);
                transferAttributes(inc, delete);
                mutations.add(delete);
            }
            delete.addDeleteMarker(cell);
        }
    }
    return mutations;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) PTable(org.apache.phoenix.schema.PTable) Result(org.apache.hadoop.hbase.client.Result) PRow(org.apache.phoenix.schema.PRow) PColumn(org.apache.phoenix.schema.PColumn) StatelessTraverseAllExpressionVisitor(org.apache.phoenix.expression.visitor.StatelessTraverseAllExpressionVisitor) EOFException(java.io.EOFException) List(java.util.List) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Cell(org.apache.hadoop.hbase.Cell) Pair(org.apache.hadoop.hbase.util.Pair) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) FirstKeyOnlyFilter(org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter) DataInputStream(java.io.DataInputStream) Put(org.apache.hadoop.hbase.client.Put) PTableProtos(org.apache.phoenix.coprocessor.generated.PTableProtos) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) DataExceedsCapacityException(org.apache.phoenix.exception.DataExceedsCapacityException) ByteArrayInputStream(java.io.ByteArrayInputStream) KeyValueColumnExpression(org.apache.phoenix.expression.KeyValueColumnExpression) Expression(org.apache.phoenix.expression.Expression) Get(org.apache.hadoop.hbase.client.Get) MultiKeyValueTuple(org.apache.phoenix.schema.tuple.MultiKeyValueTuple) Mutation(org.apache.hadoop.hbase.client.Mutation)

Example 97 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class PhoenixIndexFailurePolicy method getLocalIndexNames.

private Collection<? extends String> getLocalIndexNames(HTableInterfaceReference ref, Collection<Mutation> mutations) throws IOException {
    Set<String> indexTableNames = new HashSet<String>(1);
    PhoenixConnection conn = null;
    try {
        conn = QueryUtil.getConnectionOnServer(this.env.getConfiguration()).unwrap(PhoenixConnection.class);
        PTable dataTable = PhoenixRuntime.getTableNoCache(conn, ref.getTableName());
        List<PTable> indexes = dataTable.getIndexes();
        // local index used to get view id from index mutation row key.
        PTable localIndex = null;
        Map<ImmutableBytesWritable, String> localIndexNames = new HashMap<ImmutableBytesWritable, String>();
        for (PTable index : indexes) {
            if (localIndex == null)
                localIndex = index;
            localIndexNames.put(new ImmutableBytesWritable(MetaDataUtil.getViewIndexIdDataType().toBytes(index.getViewIndexId())), index.getName().getString());
        }
        if (localIndex == null) {
            return Collections.emptySet();
        }
        IndexMaintainer indexMaintainer = localIndex.getIndexMaintainer(dataTable, conn);
        HRegionInfo regionInfo = this.env.getRegion().getRegionInfo();
        int offset = regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length : regionInfo.getStartKey().length;
        byte[] viewId = null;
        for (Mutation mutation : mutations) {
            viewId = indexMaintainer.getViewIndexIdFromIndexRowKey(new ImmutableBytesWritable(mutation.getRow(), offset, mutation.getRow().length - offset));
            String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId));
            if (indexTableName == null) {
                LOG.error("Unable to find local index on " + ref.getTableName() + " with viewID of " + Bytes.toStringBinary(viewId));
            } else {
                indexTableNames.add(indexTableName);
            }
        }
    } catch (ClassNotFoundException e) {
        throw new IOException(e);
    } catch (SQLException e) {
        throw new IOException(e);
    } finally {
        if (conn != null) {
            try {
                conn.close();
            } catch (SQLException e) {
                throw new IOException(e);
            }
        }
    }
    return indexTableNames;
}
Also used : PhoenixConnection(org.apache.phoenix.jdbc.PhoenixConnection) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) HashMap(java.util.HashMap) SQLException(java.sql.SQLException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) PTable(org.apache.phoenix.schema.PTable) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) Mutation(org.apache.hadoop.hbase.client.Mutation) HashSet(java.util.HashSet)

Example 98 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class BaseResultIterators method clipRange.

/**
 * Truncates range to be a max of rangeSpan fields
 * @param schema row key schema
 * @param fieldIndex starting index of field with in the row key schema
 * @param rangeSpan maximum field length
 * @return the same range if unchanged and otherwise a new range
 */
public static KeyRange clipRange(RowKeySchema schema, int fieldIndex, int rangeSpan, KeyRange range) {
    if (range == KeyRange.EVERYTHING_RANGE) {
        return range;
    }
    if (range == KeyRange.EMPTY_RANGE) {
        return range;
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    boolean newRange = false;
    boolean lowerUnbound = range.lowerUnbound();
    boolean lowerInclusive = range.isLowerInclusive();
    byte[] lowerRange = range.getLowerRange();
    if (!lowerUnbound && lowerRange.length > 0) {
        if (clipKeyRangeBytes(schema, fieldIndex, rangeSpan, lowerRange, ptr, true)) {
            // Make lower range inclusive since we're decreasing the range by chopping the last part off
            lowerInclusive = true;
            lowerRange = ptr.copyBytes();
            newRange = true;
        }
    }
    boolean upperUnbound = range.upperUnbound();
    boolean upperInclusive = range.isUpperInclusive();
    byte[] upperRange = range.getUpperRange();
    if (!upperUnbound && upperRange.length > 0) {
        if (clipKeyRangeBytes(schema, fieldIndex, rangeSpan, upperRange, ptr, false)) {
            // Make lower range inclusive since we're decreasing the range by chopping the last part off
            upperInclusive = true;
            upperRange = ptr.copyBytes();
            newRange = true;
        }
    }
    return newRange ? KeyRange.getKeyRange(lowerRange, lowerInclusive, upperRange, upperInclusive) : range;
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable)

Example 99 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class ArrayConcatFunctionTest method testForCorrectSeparatorBytes2.

@Test
public void testForCorrectSeparatorBytes2() throws Exception {
    Object[] o1 = new Object[] { "a", "b" };
    Object[] o2 = new Object[] { "c", "d", "e" };
    PDataType type = PVarcharArray.INSTANCE;
    PDataType base = PVarchar.INSTANCE;
    PhoenixArray arr1 = new PhoenixArray(base, o1);
    PhoenixArray arr2 = new PhoenixArray(base, o2);
    LiteralExpression array1Literal, array2Literal;
    array1Literal = LiteralExpression.newConstant(arr1, type, null, null, SortOrder.ASC, Determinism.ALWAYS);
    array2Literal = LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS);
    List<Expression> expressions = Lists.newArrayList((Expression) array1Literal);
    expressions.add(array2Literal);
    Expression arrayConcatFunction = new ArrayConcatFunction(expressions);
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    arrayConcatFunction.evaluate(null, ptr);
    byte[] expected = new byte[] { 97, 0, 98, 0, 99, 0, 100, 0, 101, 0, 0, 0, -128, 1, -128, 3, -128, 5, -128, 7, -128, 9, 0, 0, 0, 12, 0, 0, 0, 5, 1 };
    assertArrayEquals(expected, ptr.get());
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PDataType(org.apache.phoenix.schema.types.PDataType) PhoenixArray(org.apache.phoenix.schema.types.PhoenixArray) ArrayConcatFunction(org.apache.phoenix.expression.function.ArrayConcatFunction) Test(org.junit.Test)

Example 100 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class ArrayConcatFunctionTest method testForCorrectSeparatorBytes4.

@Test
public void testForCorrectSeparatorBytes4() throws Exception {
    Object[] o1 = new Object[] { "a", "b", null };
    Object[] o2 = new Object[] { null, "c", "d", "e" };
    PDataType type = PVarcharArray.INSTANCE;
    PDataType base = PVarchar.INSTANCE;
    PhoenixArray arr1 = new PhoenixArray(base, o1);
    PhoenixArray arr2 = new PhoenixArray(base, o2);
    LiteralExpression array1Literal, array2Literal;
    array1Literal = LiteralExpression.newConstant(arr1, type, null, null, SortOrder.ASC, Determinism.ALWAYS);
    array2Literal = LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS);
    List<Expression> expressions = Lists.newArrayList((Expression) array1Literal);
    expressions.add(array2Literal);
    Expression arrayConcatFunction = new ArrayConcatFunction(expressions);
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    arrayConcatFunction.evaluate(null, ptr);
    byte[] expected = new byte[] { 97, 0, 98, 0, 0, -2, 99, 0, 100, 0, 101, 0, 0, 0, -128, 1, -128, 3, -128, 5, -128, 5, -128, 7, -128, 9, -128, 11, 0, 0, 0, 14, 0, 0, 0, 7, 1 };
    assertArrayEquals(expected, ptr.get());
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) PDataType(org.apache.phoenix.schema.types.PDataType) PhoenixArray(org.apache.phoenix.schema.types.PhoenixArray) ArrayConcatFunction(org.apache.phoenix.expression.function.ArrayConcatFunction) Test(org.junit.Test)

Aggregations

ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)296 Test (org.junit.Test)86 Expression (org.apache.phoenix.expression.Expression)36 IOException (java.io.IOException)33 PhoenixArray (org.apache.phoenix.schema.types.PhoenixArray)30 ArrayList (java.util.ArrayList)28 Configuration (org.apache.hadoop.conf.Configuration)28 Result (org.apache.hadoop.hbase.client.Result)28 Cell (org.apache.hadoop.hbase.Cell)27 KeyValue (org.apache.hadoop.hbase.KeyValue)27 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)27 PTable (org.apache.phoenix.schema.PTable)27 PDataType (org.apache.phoenix.schema.types.PDataType)26 PSmallint (org.apache.phoenix.schema.types.PSmallint)25 PTinyint (org.apache.phoenix.schema.types.PTinyint)23 Put (org.apache.hadoop.hbase.client.Put)20 PUnsignedSmallint (org.apache.phoenix.schema.types.PUnsignedSmallint)20 PUnsignedTinyint (org.apache.phoenix.schema.types.PUnsignedTinyint)20 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)19 List (java.util.List)18