use of org.apache.phoenix.parse.PSchema in project phoenix by apache.
the class MetaDataEndpointImpl method getSchema.
private PSchema getSchema(RegionScanner scanner, long clientTimeStamp) throws IOException, SQLException {
List<Cell> results = Lists.newArrayList();
scanner.next(results);
if (results.isEmpty()) {
return null;
}
Cell keyValue = results.get(0);
byte[] keyBuffer = keyValue.getRowArray();
int keyLength = keyValue.getRowLength();
int keyOffset = keyValue.getRowOffset();
PName tenantId = newPName(keyBuffer, keyOffset, keyLength);
int tenantIdLength = (tenantId == null) ? 0 : tenantId.getBytes().length;
if (tenantIdLength == 0) {
tenantId = null;
}
PName schemaName = newPName(keyBuffer, keyOffset + tenantIdLength + 1, keyLength - tenantIdLength - 1);
long timeStamp = keyValue.getTimestamp();
return new PSchema(schemaName.getString(), timeStamp);
}
use of org.apache.phoenix.parse.PSchema in project phoenix by apache.
the class ParallelIteratorsSplitTest method getSplits.
private static List<KeyRange> getSplits(final TableRef tableRef, final Scan scan, final List<HRegionLocation> regions, final ScanRanges scanRanges) throws SQLException {
final List<TableRef> tableRefs = Collections.singletonList(tableRef);
ColumnResolver resolver = new ColumnResolver() {
@Override
public List<PFunction> getFunctions() {
return Collections.emptyList();
}
@Override
public List<TableRef> getTables() {
return tableRefs;
}
@Override
public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public PFunction resolveFunction(String functionName) throws SQLException {
throw new UnsupportedOperationException();
}
@Override
public boolean hasUDFs() {
return false;
}
@Override
public PSchema resolveSchema(String schemaName) throws SQLException {
return null;
}
@Override
public List<PSchema> getSchemas() {
return null;
}
};
PhoenixConnection connection = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
final PhoenixStatement statement = new PhoenixStatement(connection);
final StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
context.setScanRanges(scanRanges);
ParallelIterators parallelIterators = new ParallelIterators(new QueryPlan() {
private final Set<TableRef> tableRefs = ImmutableSet.of(tableRef);
@Override
public StatementContext getContext() {
return context;
}
@Override
public ParameterMetaData getParameterMetaData() {
return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return ExplainPlan.EMPTY_PLAN;
}
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public ResultIterator iterator() throws SQLException {
return ResultIterator.EMPTY_ITERATOR;
}
@Override
public long getEstimatedSize() {
return 0;
}
@Override
public Set<TableRef> getSourceRefs() {
return tableRefs;
}
@Override
public TableRef getTableRef() {
return tableRef;
}
@Override
public RowProjector getProjector() {
return RowProjector.EMPTY_PROJECTOR;
}
@Override
public Integer getLimit() {
return null;
}
@Override
public Integer getOffset() {
return null;
}
@Override
public OrderBy getOrderBy() {
return OrderBy.EMPTY_ORDER_BY;
}
@Override
public GroupBy getGroupBy() {
return GroupBy.EMPTY_GROUP_BY;
}
@Override
public List<KeyRange> getSplits() {
return null;
}
@Override
public FilterableStatement getStatement() {
return SelectStatement.SELECT_ONE;
}
@Override
public boolean isDegenerate() {
return false;
}
@Override
public boolean isRowKeyOrdered() {
return true;
}
@Override
public List<List<Scan>> getScans() {
return null;
}
@Override
public Operation getOperation() {
return Operation.QUERY;
}
@Override
public boolean useRoundRobinIterator() {
return false;
}
@Override
public Long getEstimatedRowsToScan() {
return null;
}
@Override
public Long getEstimatedBytesToScan() {
return null;
}
}, null, new SpoolingResultIterator.SpoolingResultIteratorFactory(context.getConnection().getQueryServices()), context.getScan(), false);
List<KeyRange> keyRanges = parallelIterators.getSplits();
return keyRanges;
}
use of org.apache.phoenix.parse.PSchema in project phoenix by apache.
the class MetaDataClient method dropSchema.
public MutationState dropSchema(DropSchemaStatement executableDropSchemaStatement) throws SQLException {
connection.rollback();
boolean wasAutoCommit = connection.getAutoCommit();
try {
PSchema schema = new PSchema(executableDropSchemaStatement.getSchemaName());
String schemaName = schema.getSchemaName();
boolean ifExists = executableDropSchemaStatement.ifExists();
byte[] key = SchemaUtil.getSchemaKey(schemaName);
Long scn = connection.getSCN();
long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
List<Mutation> schemaMetaData = Lists.newArrayListWithExpectedSize(2);
Delete schemaDelete = new Delete(key, clientTimeStamp);
schemaMetaData.add(schemaDelete);
MetaDataMutationResult result = connection.getQueryServices().dropSchema(schemaMetaData, schemaName);
MutationCode code = result.getMutationCode();
schema = result.getSchema();
switch(code) {
case SCHEMA_NOT_FOUND:
if (!ifExists) {
throw new SchemaNotFoundException(schemaName);
}
break;
case NEWER_SCHEMA_FOUND:
throw new NewerSchemaAlreadyExistsException(schemaName);
case TABLES_EXIST_ON_SCHEMA:
throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_SCHEMA).setSchemaName(schemaName).build().buildException();
default:
connection.removeSchema(schema, result.getMutationTime());
break;
}
return new MutationState(0, 0, connection);
} finally {
connection.setAutoCommit(wasAutoCommit);
}
}
use of org.apache.phoenix.parse.PSchema in project phoenix by apache.
the class MetaDataClient method updateCache.
public MetaDataMutationResult updateCache(String schemaName, boolean alwaysHitServer) throws SQLException {
long clientTimeStamp = getClientTimeStamp();
PSchema schema = null;
try {
schema = connection.getMetaDataCache().getSchema(new PTableKey(null, schemaName));
if (schema != null && !alwaysHitServer) {
return new MetaDataMutationResult(MutationCode.SCHEMA_ALREADY_EXISTS, schema, QueryConstants.UNSET_TIMESTAMP);
}
} catch (SchemaNotFoundException e) {
}
MetaDataMutationResult result;
result = connection.getQueryServices().getSchema(schemaName, clientTimeStamp);
return result;
}
use of org.apache.phoenix.parse.PSchema in project phoenix by apache.
the class MetaDataEndpointImpl method buildDeletedSchema.
private PSchema buildDeletedSchema(byte[] key, ImmutableBytesPtr cacheKey, Region region, long clientTimeStamp) throws IOException {
if (clientTimeStamp == HConstants.LATEST_TIMESTAMP) {
return null;
}
Scan scan = MetaDataUtil.newTableRowsScan(key, clientTimeStamp, HConstants.LATEST_TIMESTAMP);
scan.setFilter(new FirstKeyOnlyFilter());
scan.setRaw(true);
List<Cell> results = Lists.<Cell>newArrayList();
try (RegionScanner scanner = region.getScanner(scan)) {
scanner.next(results);
}
// HBase ignores the time range on a raw scan (HBASE-7362)
if (!results.isEmpty() && results.get(0).getTimestamp() > clientTimeStamp) {
Cell kv = results.get(0);
if (kv.getTypeByte() == Type.Delete.getCode()) {
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
PSchema schema = newDeletedSchemaMarker(kv.getTimestamp());
metaDataCache.put(cacheKey, schema);
return schema;
}
}
return null;
}
Aggregations