Search in sources :

Example 41 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class MultiHfileOutputFormat method getRegionStartKeys.

/**
     * Return the start keys of all of the regions in this table,
     * as a list of ImmutableBytesWritable.
     */
private static Set<TableRowkeyPair> getRegionStartKeys(String tableName, RegionLocator table) throws IOException {
    byte[][] byteKeys = table.getStartKeys();
    Set<TableRowkeyPair> ret = new TreeSet<TableRowkeyPair>();
    for (byte[] byteKey : byteKeys) {
        // phoenix-2216: start : passing the table name and startkey  
        ret.add(new TableRowkeyPair(tableName, new ImmutableBytesWritable(byteKey)));
    }
    return ret;
}
Also used : TableRowkeyPair(org.apache.phoenix.mapreduce.bulkload.TableRowkeyPair) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) TreeSet(java.util.TreeSet)

Example 42 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class StatisticsWriter method addStats.

/**
     * Update a list of statistics for a given region. If the UPDATE STATISTICS <tablename> query is issued then we use
     * Upsert queries to update the table If the region gets splitted or the major compaction happens we update using
     * HTable.put()
     * 
     * @param tracker
     *            - the statistics tracker
     * @param cfKey
     *            - the family for which the stats is getting collected.
     * @param mutations
     *            - list of mutations that collects all the mutations to commit in a batch
     * @throws IOException
     *             if we fail to do any of the puts. Any single failure will prevent any future attempts for the
     *             remaining list of stats to update
     */
@SuppressWarnings("deprecation")
public void addStats(StatisticsCollector tracker, ImmutableBytesPtr cfKey, List<Mutation> mutations) throws IOException {
    if (tracker == null) {
        return;
    }
    boolean useMaxTimeStamp = clientTimeStamp == DefaultStatisticsCollector.NO_TIMESTAMP;
    long timeStamp = clientTimeStamp;
    if (useMaxTimeStamp) {
        // When using max timestamp, we write the update time later because we only know the ts
        // now
        timeStamp = tracker.getMaxTimeStamp();
        mutations.add(getLastStatsUpdatedTimePut(timeStamp));
    }
    GuidePostsInfo gps = tracker.getGuidePosts(cfKey);
    if (gps != null) {
        long[] byteCounts = gps.getByteCounts();
        long[] rowCounts = gps.getRowCounts();
        ImmutableBytesWritable keys = gps.getGuidePosts();
        boolean hasGuidePosts = keys.getLength() > 0;
        if (hasGuidePosts) {
            int guidePostCount = 0;
            try (ByteArrayInputStream stream = new ByteArrayInputStream(keys.get(), keys.getOffset(), keys.getLength())) {
                DataInput input = new DataInputStream(stream);
                PrefixByteDecoder decoder = new PrefixByteDecoder(gps.getMaxLength());
                do {
                    ImmutableBytesWritable ptr = decoder.decode(input);
                    addGuidepost(cfKey, mutations, ptr, byteCounts[guidePostCount], rowCounts[guidePostCount], timeStamp);
                    guidePostCount++;
                } while (decoder != null);
            } catch (EOFException e) {
            // Ignore as this signifies we're done
            }
            // If we've written guideposts with a guidepost key, then delete the
            // empty guidepost indicator that may have been written by other
            // regions.
            byte[] rowKey = StatisticsUtil.getRowKey(tableName, cfKey, ByteUtil.EMPTY_IMMUTABLE_BYTE_ARRAY);
            Delete delete = new Delete(rowKey, timeStamp);
            mutations.add(delete);
        } else {
            addGuidepost(cfKey, mutations, ByteUtil.EMPTY_IMMUTABLE_BYTE_ARRAY, 0, 0, timeStamp);
        }
    }
}
Also used : PrefixByteDecoder(org.apache.phoenix.util.PrefixByteDecoder) Delete(org.apache.hadoop.hbase.client.Delete) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) DataInputStream(java.io.DataInputStream) DataInput(java.io.DataInput) ByteArrayInputStream(java.io.ByteArrayInputStream) EOFException(java.io.EOFException)

Example 43 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class ArrayAppendFunctionTest method testForCorrectSeparatorBytes3.

@Test
public void testForCorrectSeparatorBytes3() throws Exception {
    Object[] o = new Object[] { "a", null, null, "c" };
    Object element = "d";
    PDataType baseType = PVarchar.INSTANCE;
    PhoenixArray arr = new PhoenixArray(baseType, o);
    LiteralExpression arrayLiteral, elementLiteral;
    arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, SortOrder.DESC, Determinism.ALWAYS);
    elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, Determinism.ALWAYS);
    List<Expression> expressions = Lists.newArrayList((Expression) arrayLiteral);
    expressions.add(elementLiteral);
    Expression arrayAppendFunction = new ArrayAppendFunction(expressions);
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    arrayAppendFunction.evaluate(null, ptr);
    byte[] expected = new byte[] { -98, -1, 0, -2, -100, -1, -101, -1, -1, -1, -128, 1, -128, 3, -128, 3, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 5, 1 };
    assertArrayEquals(expected, ptr.get());
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ArrayAppendFunction(org.apache.phoenix.expression.function.ArrayAppendFunction) Test(org.junit.Test)

Example 44 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class ArrayAppendFunctionTest method testForCorrectSeparatorBytes2.

@Test
public void testForCorrectSeparatorBytes2() throws Exception {
    Object[] o = new Object[] { "a", "b", "c" };
    Object element = "d";
    PDataType baseType = PVarchar.INSTANCE;
    PhoenixArray arr = new PhoenixArray(baseType, o);
    LiteralExpression arrayLiteral, elementLiteral;
    arrayLiteral = LiteralExpression.newConstant(arr, PVarcharArray.INSTANCE, null, null, SortOrder.DESC, Determinism.ALWAYS);
    elementLiteral = LiteralExpression.newConstant(element, baseType, null, null, SortOrder.ASC, Determinism.ALWAYS);
    List<Expression> expressions = Lists.newArrayList((Expression) arrayLiteral);
    expressions.add(elementLiteral);
    Expression arrayAppendFunction = new ArrayAppendFunction(expressions);
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    arrayAppendFunction.evaluate(null, ptr);
    byte[] expected = new byte[] { -98, -1, -99, -1, -100, -1, -101, -1, -1, -1, -128, 1, -128, 3, -128, 5, -128, 7, 0, 0, 0, 10, 0, 0, 0, 4, 1 };
    assertArrayEquals(expected, ptr.get());
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ArrayAppendFunction(org.apache.phoenix.expression.function.ArrayAppendFunction) Test(org.junit.Test)

Example 45 with ImmutableBytesWritable

use of org.apache.hadoop.hbase.io.ImmutableBytesWritable in project phoenix by apache.

the class ArrayConcatFunctionTest method testForCorrectSeparatorBytes4.

@Test
public void testForCorrectSeparatorBytes4() throws Exception {
    Object[] o1 = new Object[] { "a", "b", null };
    Object[] o2 = new Object[] { null, "c", "d", "e" };
    PDataType type = PVarcharArray.INSTANCE;
    PDataType base = PVarchar.INSTANCE;
    PhoenixArray arr1 = new PhoenixArray(base, o1);
    PhoenixArray arr2 = new PhoenixArray(base, o2);
    LiteralExpression array1Literal, array2Literal;
    array1Literal = LiteralExpression.newConstant(arr1, type, null, null, SortOrder.ASC, Determinism.ALWAYS);
    array2Literal = LiteralExpression.newConstant(arr2, type, null, null, SortOrder.DESC, Determinism.ALWAYS);
    List<Expression> expressions = Lists.newArrayList((Expression) array1Literal);
    expressions.add(array2Literal);
    Expression arrayConcatFunction = new ArrayConcatFunction(expressions);
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    arrayConcatFunction.evaluate(null, ptr);
    byte[] expected = new byte[] { 97, 0, 98, 0, 0, -2, 99, 0, 100, 0, 101, 0, 0, 0, -128, 1, -128, 3, -128, 5, -128, 5, -128, 7, -128, 9, -128, 11, 0, 0, 0, 14, 0, 0, 0, 7, 1 };
    assertArrayEquals(expected, ptr.get());
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) ArrayConcatFunction(org.apache.phoenix.expression.function.ArrayConcatFunction) Test(org.junit.Test)

Aggregations

ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)264 Test (org.junit.Test)80 Expression (org.apache.phoenix.expression.Expression)32 IOException (java.io.IOException)26 PSmallint (org.apache.phoenix.schema.types.PSmallint)25 Result (org.apache.hadoop.hbase.client.Result)24 PTable (org.apache.phoenix.schema.PTable)24 ArrayList (java.util.ArrayList)23 Cell (org.apache.hadoop.hbase.Cell)23 KeyValue (org.apache.hadoop.hbase.KeyValue)23 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)23 PTinyint (org.apache.phoenix.schema.types.PTinyint)23 PhoenixArray (org.apache.phoenix.schema.types.PhoenixArray)23 Configuration (org.apache.hadoop.conf.Configuration)20 PDataType (org.apache.phoenix.schema.types.PDataType)20 PUnsignedSmallint (org.apache.phoenix.schema.types.PUnsignedSmallint)20 PUnsignedTinyint (org.apache.phoenix.schema.types.PUnsignedTinyint)20 List (java.util.List)19 Put (org.apache.hadoop.hbase.client.Put)19 SQLException (java.sql.SQLException)18