use of org.apache.phoenix.schema.MetaDataClient in project phoenix by apache.
the class CloseStatementCompiler method compile.
public MutationPlan compile(final CloseStatement close) throws SQLException {
final PhoenixConnection connection = statement.getConnection();
final StatementContext context = new StatementContext(statement);
final MetaDataClient client = new MetaDataClient(connection);
return new BaseMutationPlan(context, operation) {
@Override
public MutationState execute() throws SQLException {
return client.close(close);
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return new ExplainPlan(Collections.singletonList("CLOSE CURSOR"));
}
};
}
use of org.apache.phoenix.schema.MetaDataClient in project phoenix by apache.
the class MutationState method shouldResubmitTransaction.
/**
* Determines whether indexes were added to mutated tables while the transaction was in progress.
* @return true if indexes were added and false otherwise.
* @throws SQLException
*/
private boolean shouldResubmitTransaction(Set<TableRef> txTableRefs) throws SQLException {
if (logger.isInfoEnabled())
logger.info("Checking for index updates as of " + getInitialWritePointer());
MetaDataClient client = new MetaDataClient(connection);
PMetaData cache = connection.getMetaDataCache();
boolean addedAnyIndexes = false;
boolean allImmutableTables = !txTableRefs.isEmpty();
for (TableRef tableRef : txTableRefs) {
PTable dataTable = tableRef.getTable();
List<PTable> oldIndexes;
PTableRef ptableRef = cache.getTableRef(dataTable.getKey());
oldIndexes = ptableRef.getTable().getIndexes();
// Always check at server for metadata change, as it's possible that the table is configured to not check for metadata changes
// but in this case, the tx manager is telling us it's likely that there has been a change.
MetaDataMutationResult result = client.updateCache(dataTable.getTenantId(), dataTable.getSchemaName().getString(), dataTable.getTableName().getString(), true);
long timestamp = TransactionUtil.getResolvedTime(connection, result);
tableRef.setTimeStamp(timestamp);
PTable updatedDataTable = result.getTable();
if (updatedDataTable == null) {
throw new TableNotFoundException(dataTable.getSchemaName().getString(), dataTable.getTableName().getString());
}
allImmutableTables &= updatedDataTable.isImmutableRows();
tableRef.setTable(updatedDataTable);
if (!addedAnyIndexes) {
// TODO: in theory we should do a deep equals check here, as it's possible
// that an index was dropped and recreated with the same name but different
// indexed/covered columns.
addedAnyIndexes = (!oldIndexes.equals(updatedDataTable.getIndexes()));
if (logger.isInfoEnabled())
logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "as of " + timestamp + " to " + updatedDataTable.getName().getString() + " with indexes " + updatedDataTable.getIndexes());
}
}
if (logger.isInfoEnabled())
logger.info((addedAnyIndexes ? "Updates " : "No updates ") + "to indexes as of " + getInitialWritePointer() + " over " + (allImmutableTables ? " all immutable tables" : " some mutable tables"));
// If any indexes were added, then the conflict might be due to DDL/DML fence.
return allImmutableTables || addedAnyIndexes;
}
use of org.apache.phoenix.schema.MetaDataClient in project phoenix by apache.
the class MutationState method validate.
private long validate(TableRef tableRef, Map<ImmutableBytesPtr, RowMutationState> rowKeyToColumnMap) throws SQLException {
Long scn = connection.getSCN();
MetaDataClient client = new MetaDataClient(connection);
long serverTimeStamp = tableRef.getTimeStamp();
// If we're auto committing, we've already validated the schema when we got the ColumnResolver,
// so no need to do it again here.
PTable table = tableRef.getTable();
MetaDataMutationResult result = client.updateCache(table.getSchemaName().getString(), table.getTableName().getString());
PTable resolvedTable = result.getTable();
if (resolvedTable == null) {
throw new TableNotFoundException(table.getSchemaName().getString(), table.getTableName().getString());
}
// Always update tableRef table as the one we've cached may be out of date since when we executed
// the UPSERT VALUES call and updated in the cache before this.
tableRef.setTable(resolvedTable);
List<PTable> indexes = resolvedTable.getIndexes();
for (PTable idxTtable : indexes) {
// our failure mode is block writes on index failure.
if (idxTtable.getIndexState() == PIndexState.ACTIVE && idxTtable.getIndexDisableTimestamp() > 0) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.INDEX_FAILURE_BLOCK_WRITE).setSchemaName(table.getSchemaName().getString()).setTableName(table.getTableName().getString()).build().buildException();
}
}
long timestamp = result.getMutationTime();
if (timestamp != QueryConstants.UNSET_TIMESTAMP) {
serverTimeStamp = timestamp;
if (result.wasUpdated()) {
List<PColumn> columns = Lists.newArrayListWithExpectedSize(table.getColumns().size());
for (Map.Entry<ImmutableBytesPtr, RowMutationState> rowEntry : rowKeyToColumnMap.entrySet()) {
RowMutationState valueEntry = rowEntry.getValue();
if (valueEntry != null) {
Map<PColumn, byte[]> colValues = valueEntry.getColumnValues();
if (colValues != PRow.DELETE_MARKER) {
for (PColumn column : colValues.keySet()) {
if (!column.isDynamic())
columns.add(column);
}
}
}
}
for (PColumn column : columns) {
if (column != null) {
resolvedTable.getColumnFamily(column.getFamilyName().getString()).getPColumnForColumnName(column.getName().getString());
}
}
}
}
return scn == null ? serverTimeStamp == QueryConstants.UNSET_TIMESTAMP ? HConstants.LATEST_TIMESTAMP : serverTimeStamp : scn;
}
Aggregations