use of org.apache.phoenix.schema.TableRef in project phoenix by apache.
the class MutationState method commit.
public void commit() throws SQLException {
Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> txMutations = Collections.emptyMap();
int retryCount = 0;
do {
boolean sendSuccessful = false;
boolean retryCommit = false;
SQLException sqlE = null;
try {
send();
txMutations = this.txMutations;
sendSuccessful = true;
} catch (SQLException e) {
sqlE = e;
} finally {
try {
if (txContext != null && isTransactionStarted()) {
TransactionFailureException txFailure = null;
boolean finishSuccessful = false;
try {
if (sendSuccessful) {
txContext.finish();
finishSuccessful = true;
}
} catch (TransactionFailureException e) {
if (logger.isInfoEnabled())
logger.info(e.getClass().getName() + " at timestamp " + getInitialWritePointer() + " with retry count of " + retryCount);
retryCommit = (e instanceof TransactionConflictException && retryCount < MAX_COMMIT_RETRIES);
txFailure = e;
SQLException nextE = TransactionUtil.getTransactionFailureException(e);
if (sqlE == null) {
sqlE = nextE;
} else {
sqlE.setNextException(nextE);
}
} finally {
// If send fails or finish fails, abort the tx
if (!finishSuccessful) {
try {
txContext.abort(txFailure);
if (logger.isInfoEnabled())
logger.info("Abort successful");
} catch (TransactionFailureException e) {
if (logger.isInfoEnabled())
logger.info("Abort failed with " + e);
SQLException nextE = TransactionUtil.getTransactionFailureException(e);
if (sqlE == null) {
sqlE = nextE;
} else {
sqlE.setNextException(nextE);
}
}
}
}
}
} finally {
try {
resetState();
} finally {
if (retryCommit) {
startTransaction();
// Add back read fences
Set<TableRef> txTableRefs = txMutations.keySet();
for (TableRef tableRef : txTableRefs) {
PTable dataTable = tableRef.getTable();
addDMLFence(dataTable);
}
try {
// Only retry if an index was added
retryCommit = shouldResubmitTransaction(txTableRefs);
} catch (SQLException e) {
retryCommit = false;
if (sqlE == null) {
sqlE = e;
} else {
sqlE.setNextException(e);
}
}
}
if (sqlE != null && !retryCommit) {
throw sqlE;
}
}
}
}
// Retry commit once if conflict occurred and index was added
if (!retryCommit) {
break;
}
retryCount++;
mutations.putAll(txMutations);
} while (true);
}
use of org.apache.phoenix.schema.TableRef in project phoenix by apache.
the class ScanPlan method newIterator.
@Override
protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
// Set any scan attributes before creating the scanner, as it will be too late afterwards
scan.setAttribute(BaseScannerRegionObserver.NON_AGGREGATE_QUERY, QueryConstants.TRUE);
ResultIterator scanner;
TableRef tableRef = this.getTableRef();
PTable table = tableRef.getTable();
boolean isSalted = table.getBucketNum() != null;
/* If no limit or topN, use parallel iterator so that we get results faster. Otherwise, if
* limit is provided, run query serially.
*/
boolean isOrdered = !orderBy.getOrderByExpressions().isEmpty();
Integer perScanLimit = !allowPageFilter || isOrdered ? null : QueryUtil.getOffsetLimit(limit, offset);
boolean isOffsetOnServer = isOffsetPossibleOnServer(context, orderBy, offset, isSalted, table.getIndexType());
/*
* For queries that are doing a row key order by and are not possibly querying more than a
* threshold worth of data, then we only need to initialize scanners corresponding to the
* first (or last, if reverse) scan per region.
*/
boolean initFirstScanOnly = (orderBy == OrderBy.FWD_ROW_KEY_ORDER_BY || orderBy == OrderBy.REV_ROW_KEY_ORDER_BY) && isDataToScanWithinThreshold;
BaseResultIterators iterators;
if (isOffsetOnServer) {
iterators = new SerialIterators(this, perScanLimit, offset, parallelIteratorFactory, scanGrouper, scan);
} else if (isSerial) {
iterators = new SerialIterators(this, perScanLimit, null, parallelIteratorFactory, scanGrouper, scan);
} else {
iterators = new ParallelIterators(this, perScanLimit, parallelIteratorFactory, scanGrouper, scan, initFirstScanOnly);
}
estimatedRows = iterators.getEstimatedRowCount();
estimatedSize = iterators.getEstimatedByteCount();
splits = iterators.getSplits();
scans = iterators.getScans();
if (isOffsetOnServer) {
scanner = new ConcatResultIterator(iterators);
if (limit != null) {
scanner = new LimitingResultIterator(scanner, limit);
}
} else if (isOrdered) {
scanner = new MergeSortTopNResultIterator(iterators, limit, offset, orderBy.getOrderByExpressions());
} else {
if ((isSalted || table.getIndexType() == IndexType.LOCAL) && ScanUtil.shouldRowsBeInRowKeyOrder(orderBy, context)) {
/*
* For salted tables or local index, a merge sort is needed if:
* 1) The config phoenix.query.force.rowkeyorder is set to true
* 2) Or if the query has an order by that wants to sort
* the results by the row key (forward or reverse ordering)
*/
scanner = new MergeSortRowKeyResultIterator(iterators, isSalted ? SaltingUtil.NUM_SALTING_BYTES : 0, orderBy == OrderBy.REV_ROW_KEY_ORDER_BY);
} else if (useRoundRobinIterator()) {
/*
* For any kind of tables, round robin is possible if there is
* no ordering of rows needed.
*/
scanner = new RoundRobinResultIterator(iterators, this);
} else {
scanner = new ConcatResultIterator(iterators);
}
if (offset != null) {
scanner = new OffsetResultIterator(scanner, offset);
}
if (limit != null) {
scanner = new LimitingResultIterator(scanner, limit);
}
}
if (context.getSequenceManager().getSequenceCount() > 0) {
scanner = new SequenceResultIterator(scanner, context.getSequenceManager());
}
return scanner;
}
use of org.apache.phoenix.schema.TableRef in project phoenix by apache.
the class MutationState method joinMutationState.
private void joinMutationState(Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> srcMutations, Map<TableRef, Map<ImmutableBytesPtr, RowMutationState>> dstMutations) {
// Merge newMutation with this one, keeping state from newMutation for any overlaps
for (Map.Entry<TableRef, Map<ImmutableBytesPtr, RowMutationState>> entry : srcMutations.entrySet()) {
// Replace existing entries for the table with new entries
TableRef tableRef = entry.getKey();
Map<ImmutableBytesPtr, RowMutationState> srcRows = entry.getValue();
joinMutationState(tableRef, srcRows, dstMutations);
}
}
use of org.apache.phoenix.schema.TableRef in project phoenix by apache.
the class WhereCompilerTest method testTenantConstraintsAddedToScanWithNullTenantTypeId.
@Test
public void testTenantConstraintsAddedToScanWithNullTenantTypeId() throws SQLException {
String tenantId = "000000000000123";
createTestTable(getUrl(), "create table base_table_for_tenant_filter_test (tenant_id char(15) not null, " + "id char(5) not null, a_integer integer, a_string varchar(100) constraint pk primary key (tenant_id, id)) multi_tenant=true");
createTestTable(getUrl(tenantId), "create view tenant_filter_test (tenant_col integer) AS SELECT * FROM BASE_TABLE_FOR_TENANT_FILTER_TEST");
String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'";
PhoenixConnection pconn = DriverManager.getConnection(getUrl(tenantId), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
QueryPlan plan = pstmt.optimizeQuery();
Scan scan = plan.getContext().getScan();
Filter filter = scan.getFilter();
PTable table = plan.getTableRef().getTable();
Expression aInteger = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()).newColumnExpression();
Expression aString = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()).newColumnExpression();
assertEquals(multiEncodedKVFilter(and(constantComparison(CompareOp.EQUAL, aInteger, 0), constantComparison(CompareOp.EQUAL, aString, "foo")), TWO_BYTE_QUALIFIERS), filter);
byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId);
assertArrayEquals(startRow, scan.getStartRow());
byte[] stopRow = startRow;
assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
}
use of org.apache.phoenix.schema.TableRef in project phoenix by apache.
the class MetaDataEndpointImpl method dropColumnsFromChildViews.
private MetaDataMutationResult dropColumnsFromChildViews(Region region, PTable basePhysicalTable, List<RowLock> locks, List<Mutation> tableMetadata, List<Mutation> mutationsForAddingColumnsToViews, byte[] schemaName, byte[] tableName, List<ImmutableBytesPtr> invalidateList, long clientTimeStamp, TableViewFinder childViewsResult, List<byte[]> tableNamesToDelete, List<SharedTableState> sharedTablesToDelete) throws IOException, SQLException {
List<Delete> columnDeletesForBaseTable = new ArrayList<>(tableMetadata.size());
// are being added.
for (Mutation m : tableMetadata) {
if (m instanceof Delete) {
byte[][] rkmd = new byte[5][];
int pkCount = getVarChars(m.getRow(), rkmd);
if (pkCount > COLUMN_NAME_INDEX && Bytes.compareTo(schemaName, rkmd[SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
columnDeletesForBaseTable.add((Delete) m);
}
}
}
for (ViewInfo viewInfo : childViewsResult.getViewInfoList()) {
short numColsDeleted = 0;
byte[] viewTenantId = viewInfo.getTenantId();
byte[] viewSchemaName = viewInfo.getSchemaName();
byte[] viewName = viewInfo.getViewName();
byte[] viewKey = SchemaUtil.getTableKey(viewTenantId, viewSchemaName, viewName);
// lock the rows corresponding to views so that no other thread can modify the view
// meta-data
RowLock viewRowLock = acquireLock(region, viewKey, locks);
PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
ColumnOrdinalPositionUpdateList ordinalPositionList = new ColumnOrdinalPositionUpdateList();
int numCols = view.getColumns().size();
int minDroppedColOrdinalPos = Integer.MAX_VALUE;
for (Delete columnDeleteForBaseTable : columnDeletesForBaseTable) {
PColumn existingViewColumn = null;
byte[][] rkmd = new byte[5][];
getVarChars(columnDeleteForBaseTable.getRow(), rkmd);
String columnName = Bytes.toString(rkmd[COLUMN_NAME_INDEX]);
String columnFamily = rkmd[FAMILY_NAME_INDEX] == null ? null : Bytes.toString(rkmd[FAMILY_NAME_INDEX]);
byte[] columnKey = getColumnKey(viewKey, columnName, columnFamily);
try {
existingViewColumn = columnFamily == null ? view.getColumnForColumnName(columnName) : view.getColumnFamily(columnFamily).getPColumnForColumnName(columnName);
} catch (ColumnFamilyNotFoundException e) {
// ignore since it means that the column family is not present for the column to
// be added.
} catch (ColumnNotFoundException e) {
// ignore since it means the column is not present in the view
}
// it
if (existingViewColumn != null && view.getViewStatement() != null) {
ParseNode viewWhere = new SQLParser(view.getViewStatement()).parseQuery().getWhere();
PhoenixConnection conn = null;
try {
conn = QueryUtil.getConnectionOnServer(env.getConfiguration()).unwrap(PhoenixConnection.class);
} catch (ClassNotFoundException e) {
}
PhoenixStatement statement = new PhoenixStatement(conn);
TableRef baseTableRef = new TableRef(basePhysicalTable);
ColumnResolver columnResolver = FromCompiler.getResolver(baseTableRef);
StatementContext context = new StatementContext(statement, columnResolver);
Expression whereExpression = WhereCompiler.compile(context, viewWhere);
Expression colExpression = new ColumnRef(baseTableRef, existingViewColumn.getPosition()).newColumnExpression();
ColumnFinder columnFinder = new ColumnFinder(colExpression);
whereExpression.accept(columnFinder);
if (columnFinder.getColumnFound()) {
return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION, EnvironmentEdgeManager.currentTimeMillis(), basePhysicalTable);
}
}
minDroppedColOrdinalPos = Math.min(getOrdinalPosition(view, existingViewColumn), minDroppedColOrdinalPos);
if (existingViewColumn != null) {
--numColsDeleted;
if (ordinalPositionList.size() == 0) {
ordinalPositionList.setOffset(view.getBucketNum() == null ? 1 : 0);
for (PColumn col : view.getColumns()) {
ordinalPositionList.addColumn(getColumnKey(viewKey, col));
}
}
ordinalPositionList.dropColumn(columnKey);
Delete viewColumnDelete = new Delete(columnKey, clientTimeStamp);
mutationsForAddingColumnsToViews.add(viewColumnDelete);
// drop any view indexes that need this column
dropIndexes(view, region, invalidateList, locks, clientTimeStamp, schemaName, view.getName().getBytes(), mutationsForAddingColumnsToViews, existingViewColumn, tableNamesToDelete, sharedTablesToDelete);
}
}
updateViewHeaderRow(basePhysicalTable, tableMetadata, mutationsForAddingColumnsToViews, invalidateList, clientTimeStamp, numColsDeleted, numColsDeleted, viewKey, view, ordinalPositionList, numCols, true);
}
return null;
}
Aggregations