use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class MetaDataClient method dropColumn.
public MutationState dropColumn(DropColumnStatement statement) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
try {
connection.setAutoCommit(false);
PName tenantId = connection.getTenantId();
TableName tableNameNode = statement.getTable().getName();
String schemaName = tableNameNode.getSchemaName();
String tableName = tableNameNode.getTableName();
String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
boolean retried = false;
while (true) {
final ColumnResolver resolver = FromCompiler.getResolver(statement, connection);
TableRef tableRef = resolver.getTables().get(0);
PTable table = tableRef.getTable();
List<ColumnName> columnRefs = statement.getColumnRefs();
if (columnRefs == null) {
columnRefs = Lists.newArrayListWithCapacity(0);
}
List<ColumnRef> columnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size() + table.getIndexes().size());
List<TableRef> indexesToDrop = Lists.newArrayListWithExpectedSize(table.getIndexes().size());
List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize((table.getIndexes().size() + 1) * (1 + table.getColumns().size() - columnRefs.size()));
List<PColumn> tableColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
for (ColumnName column : columnRefs) {
ColumnRef columnRef = null;
try {
columnRef = resolver.resolveColumn(null, column.getFamilyName(), column.getColumnName());
} catch (ColumnNotFoundException e) {
if (statement.ifExists()) {
return new MutationState(0, 0, connection);
}
throw e;
}
PColumn columnToDrop = columnRef.getColumn();
tableColumnsToDrop.add(columnToDrop);
if (SchemaUtil.isPKColumn(columnToDrop)) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_PK).setColumnName(columnToDrop.getName().getString()).build().buildException();
} else if (table.isAppendOnlySchema()) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_DROP_COL_APPEND_ONLY_SCHEMA).setColumnName(columnToDrop.getName().getString()).build().buildException();
}
columnsToDrop.add(new ColumnRef(columnRef.getTableRef(), columnToDrop.getPosition()));
}
dropColumnMutations(table, tableColumnsToDrop);
boolean removedIndexTableOrColumn = false;
Long timeStamp = table.isTransactional() ? tableRef.getTimeStamp() : null;
for (PTable index : table.getIndexes()) {
IndexMaintainer indexMaintainer = index.getIndexMaintainer(table, connection);
// get the covered columns
List<PColumn> indexColumnsToDrop = Lists.newArrayListWithExpectedSize(columnRefs.size());
Set<Pair<String, String>> indexedColsInfo = indexMaintainer.getIndexedColumnInfo();
Set<ColumnReference> coveredCols = indexMaintainer.getCoveredColumns();
for (PColumn columnToDrop : tableColumnsToDrop) {
Pair<String, String> columnToDropInfo = new Pair<>(columnToDrop.getFamilyName().getString(), columnToDrop.getName().getString());
ColumnReference colDropRef = new ColumnReference(columnToDrop.getFamilyName() == null ? null : columnToDrop.getFamilyName().getBytes(), columnToDrop.getColumnQualifierBytes());
boolean isColumnIndexed = indexedColsInfo.contains(columnToDropInfo);
if (isColumnIndexed) {
if (index.getViewIndexId() == null) {
indexesToDrop.add(new TableRef(index));
}
connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, index.getName().getString()), index.getParentName() == null ? null : index.getParentName().getString(), index.getTimeStamp());
removedIndexTableOrColumn = true;
} else if (coveredCols.contains(colDropRef)) {
String indexColumnName = IndexUtil.getIndexColumnName(columnToDrop);
PColumn indexColumn = index.getColumnForColumnName(indexColumnName);
indexColumnsToDrop.add(indexColumn);
// add the index column to be dropped so that we actually delete the column values
columnsToDrop.add(new ColumnRef(new TableRef(index), indexColumn.getPosition()));
removedIndexTableOrColumn = true;
}
}
if (!indexColumnsToDrop.isEmpty()) {
long indexTableSeqNum = incrementTableSeqNum(index, index.getType(), -indexColumnsToDrop.size(), null, null);
dropColumnMutations(index, indexColumnsToDrop);
long clientTimestamp = MutationState.getMutationTimestamp(timeStamp, connection.getSCN());
connection.removeColumn(tenantId, index.getName().getString(), indexColumnsToDrop, clientTimestamp, indexTableSeqNum, TransactionUtil.getResolvedTimestamp(connection, index.isTransactional(), clientTimestamp));
}
}
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
long seqNum = incrementTableSeqNum(table, statement.getTableType(), -tableColumnsToDrop.size(), null, null);
tableMetaData.addAll(connection.getMutationState().toMutations(timeStamp).next().getSecond());
connection.rollback();
// Force table header to be first in list
Collections.reverse(tableMetaData);
/*
* Ensure our "empty column family to be" exists. Somewhat of an edge case, but can occur if we drop the last column
* in a column family that was the empty column family. In that case, we have to pick another one. If there are no other
* ones, then we need to create our default empty column family. Note that this may no longer be necessary once we
* support declaring what the empty column family is on a table, as:
* - If you declare it, we'd just ensure it's created at DDL time and never switch what it is unless you change it
* - If you don't declare it, we can just continue to use the old empty column family in this case, dynamically updating
* the empty column family name on the PTable.
*/
for (ColumnRef columnRefToDrop : columnsToDrop) {
PTable tableContainingColumnToDrop = columnRefToDrop.getTable();
byte[] emptyCF = getNewEmptyColumnFamilyOrNull(tableContainingColumnToDrop, columnRefToDrop.getColumn());
if (emptyCF != null) {
try {
tableContainingColumnToDrop.getColumnFamily(emptyCF);
} catch (ColumnFamilyNotFoundException e) {
// Only if it's not already a column family do we need to ensure it's created
Map<String, List<Pair<String, Object>>> family = new HashMap<>(1);
family.put(Bytes.toString(emptyCF), Collections.<Pair<String, Object>>emptyList());
// Just use a Put without any key values as the Mutation, as addColumn will treat this specially
// TODO: pass through schema name and table name instead to these methods as it's cleaner
byte[] tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
if (tenantIdBytes == null)
tenantIdBytes = ByteUtil.EMPTY_BYTE_ARRAY;
connection.getQueryServices().addColumn(Collections.<Mutation>singletonList(new Put(SchemaUtil.getTableKey(tenantIdBytes, tableContainingColumnToDrop.getSchemaName().getBytes(), tableContainingColumnToDrop.getTableName().getBytes()))), tableContainingColumnToDrop, family, Sets.newHashSet(Bytes.toString(emptyCF)), Collections.<PColumn>emptyList());
}
}
}
MetaDataMutationResult result = connection.getQueryServices().dropColumn(tableMetaData, statement.getTableType());
try {
MutationCode code = processMutationResult(schemaName, tableName, result);
if (code == MutationCode.COLUMN_NOT_FOUND) {
addTableToCache(result);
if (!statement.ifExists()) {
throw new ColumnNotFoundException(schemaName, tableName, Bytes.toString(result.getFamilyName()), Bytes.toString(result.getColumnName()));
}
return new MutationState(0, 0, connection);
}
// the server when needed.
if (tableColumnsToDrop.size() > 0) {
if (removedIndexTableOrColumn)
connection.removeTable(tenantId, tableName, table.getParentName() == null ? null : table.getParentName().getString(), table.getTimeStamp());
else
connection.removeColumn(tenantId, SchemaUtil.getTableName(schemaName, tableName), tableColumnsToDrop, result.getMutationTime(), seqNum, TransactionUtil.getResolvedTime(connection, result));
}
// If we have a VIEW, then only delete the metadata, and leave the table data alone
if (table.getType() != PTableType.VIEW) {
MutationState state = null;
connection.setAutoCommit(true);
Long scn = connection.getSCN();
// Delete everything in the column. You'll still be able to do queries at earlier timestamps
long ts = (scn == null ? result.getMutationTime() : scn);
PostDDLCompiler compiler = new PostDDLCompiler(connection);
boolean dropMetaData = connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
// if the index is a local index or view index it uses a shared physical table
// so we need to issue deletes markers for all the rows of the index
final List<TableRef> tableRefsToDrop = Lists.newArrayList();
Map<String, List<TableRef>> tenantIdTableRefMap = Maps.newHashMap();
if (result.getSharedTablesToDelete() != null) {
for (SharedTableState sharedTableState : result.getSharedTablesToDelete()) {
PTableImpl viewIndexTable = new PTableImpl(sharedTableState.getTenantId(), sharedTableState.getSchemaName(), sharedTableState.getTableName(), ts, table.getColumnFamilies(), sharedTableState.getColumns(), sharedTableState.getPhysicalNames(), sharedTableState.getViewIndexId(), table.isMultiTenant(), table.isNamespaceMapped(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter(), table.useStatsForParallelization());
TableRef indexTableRef = new TableRef(viewIndexTable);
PName indexTableTenantId = sharedTableState.getTenantId();
if (indexTableTenantId == null) {
tableRefsToDrop.add(indexTableRef);
} else {
if (!tenantIdTableRefMap.containsKey(indexTableTenantId)) {
tenantIdTableRefMap.put(indexTableTenantId.getString(), Lists.<TableRef>newArrayList());
}
tenantIdTableRefMap.get(indexTableTenantId.getString()).add(indexTableRef);
}
}
}
// they would have been dropped in ConnectionQueryServices.dropColumn)
if (!dropMetaData) {
tableRefsToDrop.addAll(indexesToDrop);
}
// Drop any index tables that had the dropped column in the PK
state = connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.<PColumn>emptyList(), ts));
// Drop any tenant-specific indexes
if (!tenantIdTableRefMap.isEmpty()) {
for (Entry<String, List<TableRef>> entry : tenantIdTableRefMap.entrySet()) {
String indexTenantId = entry.getKey();
Properties props = new Properties(connection.getClientInfo());
props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId);
try (PhoenixConnection tenantConn = new PhoenixConnection(connection, connection.getQueryServices(), props)) {
PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn);
state = tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.<PColumn>emptyList(), ts));
}
}
}
// See https://issues.apache.org/jira/browse/PHOENIX-3605
if (!table.isImmutableRows() || table.getImmutableStorageScheme() == ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
// Update empty key value column if necessary
for (ColumnRef droppedColumnRef : columnsToDrop) {
// Painful, but we need a TableRef with a pre-set timestamp to prevent attempts
// to get any updates from the region server.
// TODO: move this into PostDDLCompiler
// TODO: consider filtering mutable indexes here, but then the issue is that
// we'd need to force an update of the data row empty key value if a mutable
// secondary index is changing its empty key value family.
droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts);
TableRef droppedColumnTableRef = droppedColumnRef.getTableRef();
PColumn droppedColumn = droppedColumnRef.getColumn();
MutationPlan plan = compiler.compile(Collections.singletonList(droppedColumnTableRef), getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn), null, Collections.singletonList(droppedColumn), ts);
state = connection.getQueryServices().updateData(plan);
}
}
// Return the last MutationState
return state;
}
return new MutationState(0, 0, connection);
} catch (ConcurrentTableMutationException e) {
if (retried) {
throw e;
}
table = connection.getTable(new PTableKey(tenantId, fullTableName));
retried = true;
}
}
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class IndexMaintainerTest method tesIndexedExpressionSerialization.
@Test
public void tesIndexedExpressionSerialization() throws Exception {
Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
try {
conn.setAutoCommit(true);
conn.createStatement().execute("CREATE TABLE IF NOT EXISTS FHA (ORGANIZATION_ID CHAR(15) NOT NULL, PARENT_ID CHAR(15) NOT NULL, CREATED_DATE DATE NOT NULL, ENTITY_HISTORY_ID CHAR(15) NOT NULL, FIELD_HISTORY_ARCHIVE_ID CHAR(15), CREATED_BY_ID VARCHAR, FIELD VARCHAR, DATA_TYPE VARCHAR, OLDVAL_STRING VARCHAR, NEWVAL_STRING VARCHAR, OLDVAL_FIRST_NAME VARCHAR, NEWVAL_FIRST_NAME VARCHAR, OLDVAL_LAST_NAME VARCHAR, NEWVAL_LAST_NAME VARCHAR, OLDVAL_NUMBER DECIMAL, NEWVAL_NUMBER DECIMAL, OLDVAL_DATE DATE, NEWVAL_DATE DATE, ARCHIVE_PARENT_TYPE VARCHAR, ARCHIVE_FIELD_NAME VARCHAR, ARCHIVE_TIMESTAMP DATE, ARCHIVE_PARENT_NAME VARCHAR, DIVISION INTEGER, CONNECTION_ID VARCHAR CONSTRAINT PK PRIMARY KEY (ORGANIZATION_ID, PARENT_ID, CREATED_DATE DESC, ENTITY_HISTORY_ID )) VERSIONS=1,MULTI_TENANT=true");
conn.createStatement().execute("CREATE INDEX IDX ON FHA (FIELD_HISTORY_ARCHIVE_ID, UPPER(OLDVAL_STRING) || UPPER(NEWVAL_STRING), NEWVAL_DATE - NEWVAL_DATE)");
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), "FHA"));
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
table.getIndexMaintainers(ptr, pconn);
List<IndexMaintainer> indexMaintainerList = IndexMaintainer.deserialize(ptr, GenericKeyValueBuilder.INSTANCE, true);
assertEquals(1, indexMaintainerList.size());
IndexMaintainer indexMaintainer = indexMaintainerList.get(0);
Set<ColumnReference> indexedColumns = indexMaintainer.getIndexedColumns();
assertEquals("Unexpected Number of indexed columns ", indexedColumns.size(), 4);
} finally {
conn.close();
}
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class IndexMaintainerTest method testIndexRowKeyBuilding.
private void testIndexRowKeyBuilding(String schemaName, String tableName, String dataColumns, String pk, String indexColumns, Object[] values, String includeColumns, String dataProps, String indexProps, KeyValueBuilder builder) throws Exception {
Connection conn = DriverManager.getConnection(getUrl());
String fullTableName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName), SchemaUtil.normalizeIdentifier(tableName));
String fullIndexName = SchemaUtil.getTableName(SchemaUtil.normalizeIdentifier(schemaName), SchemaUtil.normalizeIdentifier("idx"));
conn.createStatement().execute("CREATE TABLE " + fullTableName + "(" + dataColumns + " CONSTRAINT pk PRIMARY KEY (" + pk + ")) " + (dataProps.isEmpty() ? "" : dataProps));
try {
conn.createStatement().execute("CREATE INDEX idx ON " + fullTableName + "(" + indexColumns + ") " + (includeColumns.isEmpty() ? "" : "INCLUDE (" + includeColumns + ") ") + (indexProps.isEmpty() ? "" : indexProps));
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), fullTableName));
PTable index = pconn.getTable(new PTableKey(pconn.getTenantId(), fullIndexName));
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
table.getIndexMaintainers(ptr, pconn);
List<IndexMaintainer> c1 = IndexMaintainer.deserialize(ptr, builder, true);
assertEquals(1, c1.size());
IndexMaintainer im1 = c1.get(0);
StringBuilder buf = new StringBuilder("UPSERT INTO " + fullTableName + " VALUES(");
for (int i = 0; i < values.length; i++) {
buf.append("?,");
}
buf.setCharAt(buf.length() - 1, ')');
PreparedStatement stmt = conn.prepareStatement(buf.toString());
for (int i = 0; i < values.length; i++) {
stmt.setObject(i + 1, values[i]);
}
stmt.execute();
Iterator<Pair<byte[], List<KeyValue>>> iterator = PhoenixRuntime.getUncommittedDataIterator(conn);
List<KeyValue> dataKeyValues = iterator.next().getSecond();
Map<ColumnReference, byte[]> valueMap = Maps.newHashMapWithExpectedSize(dataKeyValues.size());
byte[] row = dataKeyValues.get(0).getRow();
ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable(row);
Put dataMutation = new Put(rowKeyPtr.copyBytes());
for (KeyValue kv : dataKeyValues) {
valueMap.put(new ColumnReference(kv.getFamily(), kv.getQualifier()), kv.getValue());
dataMutation.add(kv);
}
ValueGetter valueGetter = newValueGetter(row, valueMap);
List<Mutation> indexMutations = IndexTestUtil.generateIndexData(index, table, dataMutation, ptr, builder);
assertEquals(1, indexMutations.size());
assertTrue(indexMutations.get(0) instanceof Put);
Mutation indexMutation = indexMutations.get(0);
ImmutableBytesWritable indexKeyPtr = new ImmutableBytesWritable(indexMutation.getRow());
ptr.set(rowKeyPtr.get(), rowKeyPtr.getOffset(), rowKeyPtr.getLength());
byte[] mutablelndexRowKey = im1.buildRowKey(valueGetter, ptr, null, null);
byte[] immutableIndexRowKey = indexKeyPtr.copyBytes();
assertArrayEquals(immutableIndexRowKey, mutablelndexRowKey);
for (ColumnReference ref : im1.getCoveredColumns()) {
valueMap.get(ref);
}
byte[] dataRowKey = im1.buildDataRowKey(indexKeyPtr, null);
assertArrayEquals(dataRowKey, dataKeyValues.get(0).getRow());
} finally {
try {
conn.createStatement().execute("DROP TABLE " + fullTableName);
} finally {
conn.close();
}
}
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class GroupedAggregateRegionObserver method doPostScannerOpen.
/**
* Replaces the RegionScanner s with a RegionScanner that groups by the key formed by the list
* of expressions from the scan and returns the aggregated rows of each group. For example,
* given the following original rows in the RegionScanner: KEY COL1 row1 a row2 b row3 a row4 a
* the following rows will be returned for COUNT(*): KEY COUNT a 3 b 1 The client is required to
* do a sort and a final aggregation, since multiple rows with the same key may be returned from
* different regions.
*/
@Override
protected RegionScanner doPostScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException {
boolean keyOrdered = false;
byte[] expressionBytes = scan.getAttribute(BaseScannerRegionObserver.UNORDERED_GROUP_BY_EXPRESSIONS);
if (expressionBytes == null) {
expressionBytes = scan.getAttribute(BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS);
keyOrdered = true;
}
int offset = 0;
if (ScanUtil.isLocalIndex(scan)) {
/*
* For local indexes, we need to set an offset on row key expressions to skip
* the region start key.
*/
Region region = c.getEnvironment().getRegion();
offset = region.getRegionInfo().getStartKey().length != 0 ? region.getRegionInfo().getStartKey().length : region.getRegionInfo().getEndKey().length;
ScanUtil.setRowKeyOffset(scan, offset);
}
List<Expression> expressions = deserializeGroupByExpressions(expressionBytes, 0);
ServerAggregators aggregators = ServerAggregators.deserialize(scan.getAttribute(BaseScannerRegionObserver.AGGREGATORS), c.getEnvironment().getConfiguration());
RegionScanner innerScanner = s;
boolean useProto = false;
byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
useProto = localIndexBytes != null;
if (localIndexBytes == null) {
localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
}
List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes, useProto);
TupleProjector tupleProjector = null;
byte[][] viewConstants = null;
ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
if (ScanUtil.isLocalIndex(scan) || (j == null && p != null)) {
if (dataColumns != null) {
tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
}
ImmutableBytesPtr tempPtr = new ImmutableBytesPtr();
innerScanner = getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, c.getEnvironment().getRegion(), indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, p, tempPtr, useQualifierAsIndex);
}
if (j != null) {
innerScanner = new HashJoinRegionScanner(innerScanner, p, j, ScanUtil.getTenantId(scan), c.getEnvironment(), useQualifierAsIndex, useNewValueColumnQualifier);
}
long limit = Long.MAX_VALUE;
byte[] limitBytes = scan.getAttribute(GROUP_BY_LIMIT);
if (limitBytes != null) {
limit = PInteger.INSTANCE.getCodec().decodeInt(limitBytes, 0, SortOrder.getDefault());
}
if (keyOrdered) {
// already in the required group by key order
return scanOrdered(c, scan, innerScanner, expressions, aggregators, limit);
} else {
// Otherwse, collect them all up in an in memory map
return scanUnordered(c, scan, innerScanner, expressions, aggregators, limit);
}
}
use of org.apache.phoenix.hbase.index.covered.update.ColumnReference in project phoenix by apache.
the class TestColumnTracker method testHasNewerTimestamps.
@Test
public void testHasNewerTimestamps() throws Exception {
Collection<ColumnReference> columns = new ArrayList<ColumnReference>();
ColumnTracker tracker = new ColumnTracker(columns);
assertFalse("Tracker has newer timestamps when no ts set", tracker.hasNewerTimestamps());
tracker.setTs(10);
assertTrue("Tracker doesn't have newer timetamps with set ts", tracker.hasNewerTimestamps());
}
Aggregations