Search in sources :

Example 76 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class SchemaUtil method processSplit.

// Go through each slot in the schema and try match it with the split byte array. If the split
// does not confer to the schema, extends its length to match the schema.
private static byte[] processSplit(byte[] split, LinkedHashSet<PColumn> pkColumns) {
    int pos = 0, offset = 0, maxOffset = split.length;
    Iterator<PColumn> iterator = pkColumns.iterator();
    while (pos < pkColumns.size()) {
        PColumn column = iterator.next();
        if (column.getDataType().isFixedWidth()) {
            // Fixed width
            int length = SchemaUtil.getFixedByteSize(column);
            if (maxOffset - offset < length) {
                // The split truncates the field. Fill in the rest of the part and any fields that
                // are missing after this field.
                int fillInLength = length - (maxOffset - offset);
                fillInLength += estimatePartLength(pos + 1, iterator);
                return ByteUtil.fillKey(split, split.length + fillInLength);
            }
            // Account for this field, move to next position;
            offset += length;
            pos++;
        } else {
            // If we are the last slot, then we are done. Nothing needs to be filled in.
            if (pos == pkColumns.size() - 1) {
                break;
            }
            while (offset < maxOffset && split[offset] != QueryConstants.SEPARATOR_BYTE) {
                offset++;
            }
            if (offset == maxOffset) {
                // The var-length field does not end with a separator and it's not the last field.
                // SEPARATOR byte for the current var-length slot.
                int fillInLength = 1;
                fillInLength += estimatePartLength(pos + 1, iterator);
                return ByteUtil.fillKey(split, split.length + fillInLength);
            }
            // Move to the next position;
            // skip separator;
            offset += 1;
            pos++;
        }
    }
    return split;
}
Also used : PColumn(org.apache.phoenix.schema.PColumn)

Example 77 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class ColumnExpressionTest method testSerialization.

@Test
public void testSerialization() throws Exception {
    int maxLen = 30;
    int scale = 5;
    PName colName = PNameFactory.newName("c1");
    PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PDecimal.INSTANCE, maxLen, scale, true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes());
    ColumnExpression colExp = new KeyValueColumnExpression(column);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dOut = new DataOutputStream(baos);
    colExp.write(dOut);
    dOut.flush();
    ColumnExpression colExp2 = new KeyValueColumnExpression();
    byte[] bytes = baos.toByteArray();
    DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length));
    colExp2.readFields(dIn);
    assertEquals(maxLen, colExp2.getMaxLength().intValue());
    assertEquals(scale, colExp2.getScale().intValue());
    assertEquals(PDecimal.INSTANCE, colExp2.getDataType());
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) PColumnImpl(org.apache.phoenix.schema.PColumnImpl) ByteArrayInputStream(java.io.ByteArrayInputStream) DataOutputStream(java.io.DataOutputStream) PName(org.apache.phoenix.schema.PName) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DataInputStream(java.io.DataInputStream) Test(org.junit.Test)

Example 78 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class ColumnExpressionTest method testSerializationWithNullScaleAndMaxLength.

@Test
public void testSerializationWithNullScaleAndMaxLength() throws Exception {
    PName colName = PNameFactory.newName("c1");
    PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PDecimal.INSTANCE, null, null, true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes());
    ColumnExpression colExp = new KeyValueColumnExpression(column);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dOut = new DataOutputStream(baos);
    colExp.write(dOut);
    dOut.flush();
    ColumnExpression colExp2 = new KeyValueColumnExpression();
    byte[] bytes = baos.toByteArray();
    DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length));
    colExp2.readFields(dIn);
    assertNull(colExp2.getMaxLength());
    assertNull(colExp2.getScale());
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) PColumnImpl(org.apache.phoenix.schema.PColumnImpl) ByteArrayInputStream(java.io.ByteArrayInputStream) DataOutputStream(java.io.DataOutputStream) PName(org.apache.phoenix.schema.PName) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DataInputStream(java.io.DataInputStream) Test(org.junit.Test)

Example 79 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class ColumnExpressionTest method testSerializationWithNullMaxLength.

@Test
public void testSerializationWithNullMaxLength() throws Exception {
    int scale = 5;
    PName colName = PNameFactory.newName("c1");
    PColumn column = new PColumnImpl(colName, PNameFactory.newName("f1"), PVarchar.INSTANCE, null, scale, true, 20, SortOrder.getDefault(), 0, null, false, null, false, false, colName.getBytes());
    ColumnExpression colExp = new KeyValueColumnExpression(column);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dOut = new DataOutputStream(baos);
    colExp.write(dOut);
    dOut.flush();
    ColumnExpression colExp2 = new KeyValueColumnExpression();
    byte[] bytes = baos.toByteArray();
    DataInputStream dIn = new DataInputStream(new ByteArrayInputStream(bytes, 0, bytes.length));
    colExp2.readFields(dIn);
    assertNull(colExp2.getMaxLength());
    assertEquals(scale, colExp2.getScale().intValue());
    assertEquals(PVarchar.INSTANCE, colExp2.getDataType());
}
Also used : PColumn(org.apache.phoenix.schema.PColumn) PColumnImpl(org.apache.phoenix.schema.PColumnImpl) ByteArrayInputStream(java.io.ByteArrayInputStream) DataOutputStream(java.io.DataOutputStream) PName(org.apache.phoenix.schema.PName) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DataInputStream(java.io.DataInputStream) Test(org.junit.Test)

Example 80 with PColumn

use of org.apache.phoenix.schema.PColumn in project phoenix by apache.

the class IndexTestUtil method generateIndexData.

public static List<Mutation> generateIndexData(PTable indexTable, PTable dataTable, Mutation dataMutation, ImmutableBytesWritable ptr, KeyValueBuilder builder) throws SQLException {
    byte[] dataRowKey = dataMutation.getRow();
    RowKeySchema dataRowKeySchema = dataTable.getRowKeySchema();
    List<PColumn> dataPKColumns = dataTable.getPKColumns();
    int i = 0;
    int indexOffset = 0;
    Boolean hasValue;
    // Skip salt column
    int maxOffset = dataRowKey.length;
    dataRowKeySchema.iterator(dataRowKey, ptr, dataTable.getBucketNum() == null ? i : ++i);
    List<PColumn> indexPKColumns = indexTable.getPKColumns();
    List<PColumn> indexColumns = indexTable.getColumns();
    int nIndexColumns = indexPKColumns.size();
    int maxIndexValues = indexColumns.size() - nIndexColumns - indexOffset;
    BitSet indexValuesSet = new BitSet(maxIndexValues);
    byte[][] indexValues = new byte[indexColumns.size() - indexOffset][];
    while ((hasValue = dataRowKeySchema.next(ptr, i, maxOffset)) != null) {
        if (hasValue) {
            PColumn dataColumn = dataPKColumns.get(i);
            PColumn indexColumn = indexTable.getColumnForColumnName(IndexUtil.getIndexColumnName(dataColumn));
            coerceDataValueToIndexValue(dataColumn, indexColumn, ptr);
            indexValues[indexColumn.getPosition() - indexOffset] = ptr.copyBytes();
        }
        i++;
    }
    PRow row;
    long ts = MetaDataUtil.getClientTimeStamp(dataMutation);
    if (dataMutation instanceof Delete && dataMutation.getFamilyCellMap().values().isEmpty()) {
        indexTable.newKey(ptr, indexValues);
        row = indexTable.newRow(builder, ts, ptr, false);
        row.delete();
    } else {
        // If no column families in table, then nothing to look for
        if (!dataTable.getColumnFamilies().isEmpty()) {
            for (Map.Entry<byte[], List<Cell>> entry : dataMutation.getFamilyCellMap().entrySet()) {
                PColumnFamily family = dataTable.getColumnFamily(entry.getKey());
                for (Cell kv : entry.getValue()) {
                    @SuppressWarnings("deprecation") byte[] cq = kv.getQualifier();
                    byte[] emptyKVQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(dataTable).getFirst();
                    if (Bytes.compareTo(emptyKVQualifier, cq) != 0) {
                        try {
                            PColumn dataColumn = family.getPColumnForColumnQualifier(cq);
                            PColumn indexColumn = indexTable.getColumnForColumnName(IndexUtil.getIndexColumnName(family.getName().getString(), dataColumn.getName().getString()));
                            ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength());
                            coerceDataValueToIndexValue(dataColumn, indexColumn, ptr);
                            indexValues[indexPKColumns.indexOf(indexColumn) - indexOffset] = ptr.copyBytes();
                            if (!SchemaUtil.isPKColumn(indexColumn)) {
                                indexValuesSet.set(indexColumn.getPosition() - nIndexColumns - indexOffset);
                            }
                        } catch (ColumnNotFoundException e) {
                        // Ignore as this means that the data column isn't in the index
                        }
                    }
                }
            }
        }
        indexTable.newKey(ptr, indexValues);
        row = indexTable.newRow(builder, ts, ptr, false);
        int pos = 0;
        while ((pos = indexValuesSet.nextSetBit(pos)) >= 0) {
            int index = nIndexColumns + indexOffset + pos++;
            PColumn indexColumn = indexColumns.get(index);
            row.setValue(indexColumn, indexValues[index]);
        }
    }
    return row.toRowMutations();
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) BitSet(java.util.BitSet) RowKeySchema(org.apache.phoenix.schema.RowKeySchema) PColumnFamily(org.apache.phoenix.schema.PColumnFamily) PRow(org.apache.phoenix.schema.PRow) PColumn(org.apache.phoenix.schema.PColumn) ColumnNotFoundException(org.apache.phoenix.schema.ColumnNotFoundException) List(java.util.List) Map(java.util.Map) Cell(org.apache.hadoop.hbase.Cell)

Aggregations

PColumn (org.apache.phoenix.schema.PColumn)101 PTable (org.apache.phoenix.schema.PTable)59 PhoenixConnection (org.apache.phoenix.jdbc.PhoenixConnection)26 Expression (org.apache.phoenix.expression.Expression)21 TableRef (org.apache.phoenix.schema.TableRef)20 ArrayList (java.util.ArrayList)19 PName (org.apache.phoenix.schema.PName)18 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)17 LiteralExpression (org.apache.phoenix.expression.LiteralExpression)17 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)17 ColumnRef (org.apache.phoenix.schema.ColumnRef)17 Hint (org.apache.phoenix.parse.HintNode.Hint)14 PTableKey (org.apache.phoenix.schema.PTableKey)14 ColumnNotFoundException (org.apache.phoenix.schema.ColumnNotFoundException)13 PColumnFamily (org.apache.phoenix.schema.PColumnFamily)13 PSmallint (org.apache.phoenix.schema.types.PSmallint)13 SQLException (java.sql.SQLException)12 ProjectedColumnExpression (org.apache.phoenix.expression.ProjectedColumnExpression)12 PColumnImpl (org.apache.phoenix.schema.PColumnImpl)12 Map (java.util.Map)11