use of org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder in project phoenix by apache.
the class TenantIdByteConversionTest method getDataSchema.
public static RowKeySchema getDataSchema(final PDataType data, final SortOrder sortOrder) {
RowKeySchemaBuilder builder = new RowKeySchemaBuilder(3);
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public PDataType getDataType() {
return data;
}
@Override
public Integer getMaxLength() {
return 1;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return sortOrder;
}
}, false, sortOrder);
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public PDataType getDataType() {
return PUnsignedInt.INSTANCE;
}
@Override
public Integer getMaxLength() {
return 3;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return sortOrder;
}
}, false, sortOrder);
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return true;
}
@Override
public PDataType getDataType() {
return PVarchar.INSTANCE;
}
@Override
public Integer getMaxLength() {
return 3;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return sortOrder;
}
}, false, sortOrder);
return builder.build();
}
use of org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder in project phoenix by apache.
the class ScanRangesTest method foreach.
private static Collection<?> foreach(KeyRange[][] ranges, int[] widths, KeyRange keyRange, boolean expectedResult) {
List<List<KeyRange>> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST);
RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10);
for (final int width : widths) {
if (width > 0) {
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public PDataType getDataType() {
return PChar.INSTANCE;
}
@Override
public Integer getMaxLength() {
return width;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return SortOrder.getDefault();
}
}, false, SortOrder.getDefault());
} else {
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public PDataType getDataType() {
return PVarchar.INSTANCE;
}
@Override
public Integer getMaxLength() {
return width;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return SortOrder.getDefault();
}
}, false, SortOrder.getDefault());
}
}
ScanRanges scanRanges = ScanRanges.createSingleSpan(builder.build(), slots);
return foreach(scanRanges, widths, keyRange, expectedResult);
}
use of org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder in project phoenix by apache.
the class PTableImpl method init.
private void init(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state, long timeStamp, long sequenceNumber, PName pkName, Integer bucketNum, Collection<PColumn> columns, PName parentSchemaName, PName parentTableName, List<PTable> indexes, boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType, int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, ImmutableStorageScheme storageScheme, QualifierEncodingScheme qualifierEncodingScheme, EncodedCQCounter encodedCQCounter, boolean useStatsForParallelization) throws SQLException {
Preconditions.checkNotNull(schemaName);
// tenantId should be null or not empty
Preconditions.checkArgument(tenantId == null || tenantId.getBytes().length > 0);
int estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE + PNameFactory.getEstimatedSize(tenantId) + PNameFactory.getEstimatedSize(schemaName) + PNameFactory.getEstimatedSize(tableName) + PNameFactory.getEstimatedSize(pkName) + PNameFactory.getEstimatedSize(parentTableName) + PNameFactory.getEstimatedSize(defaultFamilyName);
this.tenantId = tenantId;
this.schemaName = schemaName;
this.tableName = tableName;
this.name = PNameFactory.newName(SchemaUtil.getTableName(schemaName.getString(), tableName.getString()));
this.key = new PTableKey(tenantId, name.getString());
this.type = type;
this.state = state;
this.timeStamp = timeStamp;
this.indexDisableTimestamp = indexDisableTimestamp;
this.sequenceNumber = sequenceNumber;
this.pkName = pkName;
this.isImmutableRows = isImmutableRows;
this.defaultFamilyName = defaultFamilyName;
this.viewStatement = viewExpression;
this.disableWAL = disableWAL;
this.multiTenant = multiTenant;
this.storeNulls = storeNulls;
this.viewType = viewType;
this.viewIndexId = viewIndexId;
this.indexType = indexType;
this.isTransactional = isTransactional;
this.rowKeyOrderOptimizable = rowKeyOrderOptimizable;
this.updateCacheFrequency = updateCacheFrequency;
this.isNamespaceMapped = isNamespaceMapped;
this.autoPartitionSeqName = autoPartitionSeqName;
this.isAppendOnlySchema = isAppendOnlySchema;
// null check for backward compatibility and sanity. If any of the two below is null, then it means the table is a non-encoded table.
this.immutableStorageScheme = storageScheme == null ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN : storageScheme;
this.qualifierEncodingScheme = qualifierEncodingScheme == null ? QualifierEncodingScheme.NON_ENCODED_QUALIFIERS : qualifierEncodingScheme;
List<PColumn> pkColumns;
PColumn[] allColumns;
this.columnsByName = ArrayListMultimap.create(columns.size(), 1);
this.kvColumnsByQualifiers = Maps.newHashMapWithExpectedSize(columns.size());
int numPKColumns = 0;
if (bucketNum != null) {
// Add salt column to allColumns and pkColumns, but don't add to
// columnsByName, since it should not be addressable via name.
allColumns = new PColumn[columns.size() + 1];
allColumns[SALTING_COLUMN.getPosition()] = SALTING_COLUMN;
pkColumns = Lists.newArrayListWithExpectedSize(columns.size() + 1);
++numPKColumns;
} else {
allColumns = new PColumn[columns.size()];
pkColumns = Lists.newArrayListWithExpectedSize(columns.size());
}
for (PColumn column : columns) {
allColumns[column.getPosition()] = column;
PName familyName = column.getFamilyName();
if (familyName == null) {
++numPKColumns;
}
String columnName = column.getName().getString();
if (columnsByName.put(columnName, column)) {
int count = 0;
for (PColumn dupColumn : columnsByName.get(columnName)) {
if (Objects.equal(familyName, dupColumn.getFamilyName())) {
count++;
if (count > 1) {
throw new ColumnAlreadyExistsException(schemaName.getString(), name.getString(), columnName);
}
}
}
}
byte[] cq = column.getColumnQualifierBytes();
String cf = column.getFamilyName() != null ? column.getFamilyName().getString() : null;
if (cf != null && cq != null) {
KVColumnFamilyQualifier info = new KVColumnFamilyQualifier(cf, cq);
if (kvColumnsByQualifiers.get(info) != null) {
throw new ColumnAlreadyExistsException(schemaName.getString(), name.getString(), columnName);
}
kvColumnsByQualifiers.put(info, column);
}
}
// for multi-map
estimatedSize += SizedUtil.sizeOfMap(allColumns.length, SizedUtil.POINTER_SIZE, SizedUtil.sizeOfArrayList(1));
this.bucketNum = bucketNum;
this.allColumns = ImmutableList.copyOf(allColumns);
estimatedSize += SizedUtil.sizeOfMap(numPKColumns) + SizedUtil.sizeOfMap(allColumns.length);
RowKeySchemaBuilder builder = new RowKeySchemaBuilder(numPKColumns);
// Two pass so that column order in column families matches overall column order
// and to ensure that column family order is constant
int maxExpectedSize = allColumns.length - numPKColumns;
// Maintain iteration order so that column families are ordered as they are listed
Map<PName, List<PColumn>> familyMap = Maps.newLinkedHashMap();
PColumn rowTimestampCol = null;
for (PColumn column : allColumns) {
PName familyName = column.getFamilyName();
if (familyName == null) {
hasColumnsRequiringUpgrade |= (column.getSortOrder() == SortOrder.DESC && (!column.getDataType().isFixedWidth() || column.getDataType() == PChar.INSTANCE || column.getDataType() == PFloat.INSTANCE || column.getDataType() == PDouble.INSTANCE || column.getDataType() == PBinary.INSTANCE)) || (column.getSortOrder() == SortOrder.ASC && column.getDataType() == PBinary.INSTANCE && column.getMaxLength() != null && column.getMaxLength() > 1);
pkColumns.add(column);
if (column.isRowTimestamp()) {
rowTimestampCol = column;
}
}
if (familyName == null) {
// PK columns
estimatedSize += column.getEstimatedSize();
builder.addField(column, column.isNullable(), column.getSortOrder());
} else {
List<PColumn> columnsInFamily = familyMap.get(familyName);
if (columnsInFamily == null) {
columnsInFamily = Lists.newArrayListWithExpectedSize(maxExpectedSize);
familyMap.put(familyName, columnsInFamily);
}
columnsInFamily.add(column);
}
}
this.pkColumns = ImmutableList.copyOf(pkColumns);
if (rowTimestampCol != null) {
this.rowTimestampColPos = this.pkColumns.indexOf(rowTimestampCol);
} else {
this.rowTimestampColPos = -1;
}
// after hasDescVarLengthColumns is calculated
builder.rowKeyOrderOptimizable(this.rowKeyOrderOptimizable());
this.rowKeySchema = builder.build();
estimatedSize += rowKeySchema.getEstimatedSize();
Iterator<Map.Entry<PName, List<PColumn>>> iterator = familyMap.entrySet().iterator();
PColumnFamily[] families = new PColumnFamily[familyMap.size()];
ImmutableMap.Builder<String, PColumnFamily> familyByString = ImmutableMap.builder();
ImmutableSortedMap.Builder<byte[], PColumnFamily> familyByBytes = ImmutableSortedMap.orderedBy(Bytes.BYTES_COMPARATOR);
for (int i = 0; i < families.length; i++) {
Map.Entry<PName, List<PColumn>> entry = iterator.next();
PColumnFamily family = new PColumnFamilyImpl(entry.getKey(), entry.getValue());
families[i] = family;
familyByString.put(family.getName().getString(), family);
familyByBytes.put(family.getName().getBytes(), family);
estimatedSize += family.getEstimatedSize();
}
this.families = ImmutableList.copyOf(families);
this.familyByBytes = familyByBytes.build();
this.familyByString = familyByString.build();
estimatedSize += SizedUtil.sizeOfArrayList(families.length);
estimatedSize += SizedUtil.sizeOfMap(families.length) * 2;
this.indexes = indexes == null ? Collections.<PTable>emptyList() : indexes;
for (PTable index : this.indexes) {
estimatedSize += index.getEstimatedSize();
}
this.parentSchemaName = parentSchemaName;
this.parentTableName = parentTableName;
this.parentName = parentTableName == null ? null : PNameFactory.newName(SchemaUtil.getTableName(parentSchemaName != null ? parentSchemaName.getString() : null, parentTableName.getString()));
estimatedSize += PNameFactory.getEstimatedSize(this.parentName);
this.physicalNames = physicalNames == null ? ImmutableList.<PName>of() : ImmutableList.copyOf(physicalNames);
for (PName name : this.physicalNames) {
estimatedSize += name.getEstimatedSize();
}
this.estimatedSize = estimatedSize;
this.baseColumnCount = baseColumnCount;
this.encodedCQCounter = encodedCQCounter;
this.useStatsForParallelization = useStatsForParallelization;
}
use of org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder in project phoenix by apache.
the class SkipScanFilterIntersectTest method foreach.
private static Collection<?> foreach(KeyRange[][] ranges, int[] widths, byte[] lowerInclusive, byte[] upperExclusive, KeyRange[][] expectedRanges) {
List<List<KeyRange>> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST);
List<List<KeyRange>> expectedSlots = expectedRanges == null ? null : Lists.transform(Lists.newArrayList(expectedRanges), ARRAY_TO_LIST);
RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10);
for (final int width : widths) {
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return width <= 0;
}
@Override
public PDataType getDataType() {
return width <= 0 ? PVarchar.INSTANCE : PChar.INSTANCE;
}
@Override
public Integer getMaxLength() {
return width <= 0 ? null : width;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return SortOrder.getDefault();
}
}, width <= 0, SortOrder.getDefault());
}
List<Object> ret = Lists.newArrayList();
ret.add(new Object[] { slots, builder.build(), lowerInclusive, upperExclusive, expectedSlots });
return ret;
}
use of org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder in project phoenix by apache.
the class SaltedScanRangesTest method foreach.
private static Collection<?> foreach(KeyRange[][] ranges, int[] widths, KeyRange keyRange, boolean useSkipScan, boolean expectedResult) {
List<List<KeyRange>> slots = Lists.transform(Lists.newArrayList(ranges), ARRAY_TO_LIST);
slots = new ArrayList<>(slots);
slots.add(0, Collections.singletonList(KeyRange.getKeyRange(new byte[] { 0 })));
RowKeySchemaBuilder builder = new RowKeySchemaBuilder(10);
builder.addField(SaltingUtil.SALTING_COLUMN, false, SortOrder.getDefault());
for (final int width : widths) {
if (width > 0) {
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public PDataType getDataType() {
return PChar.INSTANCE;
}
@Override
public Integer getMaxLength() {
return width;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return SortOrder.getDefault();
}
}, false, SortOrder.getDefault());
} else {
builder.addField(new PDatum() {
@Override
public boolean isNullable() {
return false;
}
@Override
public PDataType getDataType() {
return PVarchar.INSTANCE;
}
@Override
public Integer getMaxLength() {
return width;
}
@Override
public Integer getScale() {
return null;
}
@Override
public SortOrder getSortOrder() {
return SortOrder.getDefault();
}
}, false, SortOrder.getDefault());
}
}
ScanRanges scanRanges = ScanRanges.createSingleSpan(builder.build(), slots, nBuckets, useSkipScan);
return foreach(scanRanges, widths, keyRange, expectedResult);
}
Aggregations