use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class MutationState method generateMutations.
private void generateMutations(final TableRef tableRef, long timestamp, final Map<ImmutableBytesPtr, RowMutationState> values, final List<Mutation> mutationList, final List<Mutation> mutationsPertainingToIndex) {
final PTable table = tableRef.getTable();
boolean tableWithRowTimestampCol = table.getRowTimestampColPos() != -1;
Iterator<Map.Entry<ImmutableBytesPtr, RowMutationState>> iterator = values.entrySet().iterator();
long timestampToUse = timestamp;
Map<ImmutableBytesPtr, RowMutationState> modifiedValues = Maps.newHashMap();
while (iterator.hasNext()) {
Map.Entry<ImmutableBytesPtr, RowMutationState> rowEntry = iterator.next();
byte[] onDupKeyBytes = rowEntry.getValue().getOnDupKeyBytes();
boolean hasOnDupKey = onDupKeyBytes != null;
ImmutableBytesPtr key = rowEntry.getKey();
RowMutationState state = rowEntry.getValue();
if (tableWithRowTimestampCol) {
RowTimestampColInfo rowTsColInfo = state.getRowTimestampColInfo();
if (rowTsColInfo.useServerTimestamp()) {
// since we are about to modify the byte[] stored in key (which changes its hashcode)
// we need to remove the entry from the values map and add a new entry with the modified byte[]
modifiedValues.put(key, state);
iterator.remove();
// regenerate the key with this timestamp.
key = getNewRowKeyWithRowTimestamp(key, timestampToUse, table);
} else {
if (rowTsColInfo.getTimestamp() != null) {
timestampToUse = rowTsColInfo.getTimestamp();
}
}
}
PRow row = tableRef.getTable().newRow(connection.getKeyValueBuilder(), timestampToUse, key, hasOnDupKey);
List<Mutation> rowMutations, rowMutationsPertainingToIndex;
if (rowEntry.getValue().getColumnValues() == PRow.DELETE_MARKER) {
// means delete
row.delete();
rowMutations = row.toRowMutations();
// Row deletes for index tables are processed by running a re-written query
// against the index table (as this allows for flexibility in being able to
// delete rows).
rowMutationsPertainingToIndex = Collections.emptyList();
} else {
for (Map.Entry<PColumn, byte[]> valueEntry : rowEntry.getValue().getColumnValues().entrySet()) {
row.setValue(valueEntry.getKey(), valueEntry.getValue());
}
rowMutations = row.toRowMutations();
// TODO: use our ServerCache
for (Mutation mutation : rowMutations) {
if (onDupKeyBytes != null) {
mutation.setAttribute(PhoenixIndexBuilder.ATOMIC_OP_ATTRIB, onDupKeyBytes);
}
}
rowMutationsPertainingToIndex = rowMutations;
}
mutationList.addAll(rowMutations);
if (connection.isReplayMutations()) {
// correct index rows on replay.
for (Mutation mutation : rowMutations) {
mutation.setAttribute(BaseScannerRegionObserver.IGNORE_NEWER_MUTATIONS, PDataType.TRUE_BYTES);
}
}
if (mutationsPertainingToIndex != null)
mutationsPertainingToIndex.addAll(rowMutationsPertainingToIndex);
}
values.putAll(modifiedValues);
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class MutationState method joinMutationState.
private void joinMutationState(TableRef tableRef, Map<ImmutableBytesPtr, RowMutationState> srcRows, Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> dstMutations) {
PTable table = tableRef.getTable();
boolean isIndex = table.getType() == PTableType.INDEX;
boolean incrementRowCount = dstMutations == this.mutations;
Map<ImmutableBytesPtr, RowMutationState> existingRows = dstMutations.put(tableRef, srcRows);
if (existingRows != null) {
// Loop through new rows and replace existing with new
for (Map.Entry<ImmutableBytesPtr, RowMutationState> rowEntry : srcRows.entrySet()) {
// Replace existing row with new row
RowMutationState existingRowMutationState = existingRows.put(rowEntry.getKey(), rowEntry.getValue());
if (existingRowMutationState != null) {
Map<PColumn, byte[]> existingValues = existingRowMutationState.getColumnValues();
if (existingValues != PRow.DELETE_MARKER) {
Map<PColumn, byte[]> newRow = rowEntry.getValue().getColumnValues();
// if new row is PRow.DELETE_MARKER, it means delete, and we don't need to merge it with existing row.
if (newRow != PRow.DELETE_MARKER) {
// Merge existing column values with new column values
existingRowMutationState.join(rowEntry.getValue());
// Now that the existing row has been merged with the new row, replace it back
// again (since it was merged with the new one above).
existingRows.put(rowEntry.getKey(), existingRowMutationState);
}
}
} else {
if (incrementRowCount && !isIndex) {
// Don't count index rows in row count
numRows++;
}
}
}
// Put the existing one back now that it's merged
dstMutations.put(tableRef, existingRows);
} else {
// Size new map at batch size as that's what it'll likely grow to.
Map<ImmutableBytesPtr, RowMutationState> newRows = Maps.newHashMapWithExpectedSize(connection.getMutateBatchSize());
newRows.putAll(srcRows);
dstMutations.put(tableRef, newRows);
if (incrementRowCount && !isIndex) {
numRows += srcRows.size();
}
}
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class MutationState method validate.
private long validate(TableRef tableRef, Map<ImmutableBytesPtr, RowMutationState> rowKeyToColumnMap) throws SQLException {
Long scn = connection.getSCN();
MetaDataClient client = new MetaDataClient(connection);
long serverTimeStamp = tableRef.getTimeStamp();
// If we're auto committing, we've already validated the schema when we got the ColumnResolver,
// so no need to do it again here.
PTable table = tableRef.getTable();
MetaDataMutationResult result = client.updateCache(table.getSchemaName().getString(), table.getTableName().getString());
PTable resolvedTable = result.getTable();
if (resolvedTable == null) {
throw new TableNotFoundException(table.getSchemaName().getString(), table.getTableName().getString());
}
// Always update tableRef table as the one we've cached may be out of date since when we executed
// the UPSERT VALUES call and updated in the cache before this.
tableRef.setTable(resolvedTable);
List<PTable> indexes = resolvedTable.getIndexes();
for (PTable idxTtable : indexes) {
// our failure mode is block writes on index failure.
if (idxTtable.getIndexState() == PIndexState.ACTIVE && idxTtable.getIndexDisableTimestamp() > 0) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_FAILURE_BLOCK_WRITE).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
}
}
long timestamp = result.getMutationTime();
if (timestamp != QueryConstants.UNSET_TIMESTAMP) {
serverTimeStamp = timestamp;
if (result.wasUpdated()) {
List<PColumn> columns = Lists.newArrayListWithExpectedSize(table.getColumns().size());
for (Map.Entry<ImmutableBytesPtr, RowMutationState> rowEntry : rowKeyToColumnMap.entrySet()) {
RowMutationState valueEntry = rowEntry.getValue();
if (valueEntry != null) {
Map<PColumn, byte[]> colValues = valueEntry.getColumnValues();
if (colValues != PRow.DELETE_MARKER) {
for (PColumn column : colValues.keySet()) {
if (!column.isDynamic())
columns.add(column);
}
}
}
}
for (PColumn column : columns) {
if (column != null) {
resolvedTable.getColumnFamily(column.getFamilyName().getString()).getPColumnForColumnName(column.getName().getString());
}
}
}
}
return scn == null ? serverTimeStamp == QueryConstants.UNSET_TIMESTAMP ? HConstants.LATEST_TIMESTAMP : serverTimeStamp : scn;
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class QueryOptimizerTest method testCharArrayLength.
@Test
public void testCharArrayLength() throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
conn.createStatement().execute("CREATE TABLE TEST.TEST (testInt INTEGER, testCharArray CHAR(3)[], testByteArray BINARY(7)[], " + "CONSTRAINT test_pk PRIMARY KEY(testInt)) DEFAULT_COLUMN_FAMILY='T'");
conn.createStatement().execute("CREATE INDEX TEST_INDEX ON TEST.TEST (testInt) INCLUDE (testCharArray, testByteArray)");
PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
QueryPlan plan = stmt.optimizeQuery("SELECT /*+ INDEX(TEST.TEST TEST_INDEX)*/ testCharArray,testByteArray FROM TEST.TEST");
List<PColumn> columns = plan.getTableRef().getTable().getColumns();
assertEquals(3, columns.size());
assertEquals(3, columns.get(1).getMaxLength().intValue());
assertEquals(7, columns.get(2).getMaxLength().intValue());
}
use of org.apache.phoenix.schema.PColumn in project phoenix by apache.
the class FormatToKeyValueReducer method initColumnsMap.
private void initColumnsMap(PhoenixConnection conn) throws SQLException {
Map<byte[], Integer> indexMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
columnIndexes = new HashMap<>();
int columnIndex = 0;
for (int index = 0; index < logicalNames.size(); index++) {
PTable table = PhoenixRuntime.getTable(conn, logicalNames.get(index));
if (!table.getImmutableStorageScheme().equals(ImmutableStorageScheme.ONE_CELL_PER_COLUMN)) {
List<PColumnFamily> cfs = table.getColumnFamilies();
for (int i = 0; i < cfs.size(); i++) {
byte[] family = cfs.get(i).getName().getBytes();
Pair<byte[], byte[]> pair = new Pair<>(family, QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES);
columnIndexes.put(new Integer(columnIndex), pair);
columnIndex++;
}
} else {
List<PColumn> cls = table.getColumns();
for (int i = 0; i < cls.size(); i++) {
PColumn c = cls.get(i);
byte[] family = new byte[0];
byte[] cq;
if (!SchemaUtil.isPKColumn(c)) {
family = c.getFamilyName().getBytes();
cq = c.getColumnQualifierBytes();
} else {
cq = c.getName().getBytes();
}
byte[] cfn = Bytes.add(family, QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq);
Pair<byte[], byte[]> pair = new Pair<>(family, cq);
if (!indexMap.containsKey(cfn)) {
indexMap.put(cfn, new Integer(columnIndex));
columnIndexes.put(new Integer(columnIndex), pair);
columnIndex++;
}
}
}
byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table);
byte[] emptyKeyValue = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst();
Pair<byte[], byte[]> pair = new Pair<>(emptyColumnFamily, emptyKeyValue);
columnIndexes.put(new Integer(columnIndex), pair);
columnIndex++;
}
}
Aggregations