use of org.apache.phoenix.query.KeyRange in project phoenix by apache.
the class TenantSpecificTablesDMLIT method testBasicUpsertSelect2.
@Test
public void testBasicUpsertSelect2() throws Exception {
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn1 = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL, props);
createTestTable(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, TENANT_TABLE_DDL);
Connection conn2 = DriverManager.getConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2, props);
try {
conn1.setAutoCommit(false);
conn1.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('me','" + TENANT_TYPE_ID + "',1,'Cheap Sunglasses')");
conn1.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('you','" + TENANT_TYPE_ID + "',2,'Viva Las Vegas')");
conn1.commit();
analyzeTable(conn1, TENANT_TABLE_NAME);
conn2.setAutoCommit(true);
conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('them','" + TENANT_TYPE_ID + "',1,'Long Hair')");
conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('us','" + TENANT_TYPE_ID + "',2,'Black Hat')");
ResultSet rs = conn1.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME + " where id = 1");
assertTrue("Expected 1 row in result set", rs.next());
assertEquals(1, rs.getInt(3));
assertEquals("Cheap Sunglasses", rs.getString(4));
assertFalse("Expected 1 row in result set", rs.next());
analyzeTable(conn2, TENANT_TABLE_NAME);
rs = conn2.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME + " where id = 2");
assertTrue("Expected 1 row in result set", rs.next());
assertEquals(2, rs.getInt(3));
assertEquals("Black Hat", rs.getString(4));
assertFalse("Expected 1 row in result set", rs.next());
analyzeTable(conn1, TENANT_TABLE_NAME);
conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " select * from " + TENANT_TABLE_NAME);
conn2.commit();
rs = conn2.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME);
assertTrue("Expected row in result set", rs.next());
assertEquals(1, rs.getInt(3));
assertEquals("Long Hair", rs.getString(4));
assertTrue("Expected row in result set", rs.next());
assertEquals(2, rs.getInt(3));
assertEquals("Black Hat", rs.getString(4));
assertFalse("Expected 2 rows total", rs.next());
conn2.setAutoCommit(true);
;
conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " select 'all', tenant_type_id, id, 'Big ' || tenant_col from " + TENANT_TABLE_NAME);
analyzeTable(conn2, TENANT_TABLE_NAME);
rs = conn2.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME);
assertTrue("Expected row in result set", rs.next());
assertEquals("all", rs.getString(1));
assertEquals(TENANT_TYPE_ID, rs.getString(2));
assertEquals(1, rs.getInt(3));
assertEquals("Big Long Hair", rs.getString(4));
assertTrue("Expected row in result set", rs.next());
assertEquals("all", rs.getString(1));
assertEquals(TENANT_TYPE_ID, rs.getString(2));
assertEquals(2, rs.getInt(3));
assertEquals("Big Black Hat", rs.getString(4));
assertFalse("Expected 2 rows total", rs.next());
rs = conn1.createStatement().executeQuery("select * from " + TENANT_TABLE_NAME);
assertTrue("Expected row row in result set", rs.next());
assertEquals(1, rs.getInt(3));
assertEquals("Cheap Sunglasses", rs.getString(4));
assertTrue("Expected 1 row in result set", rs.next());
assertEquals(2, rs.getInt(3));
assertEquals("Viva Las Vegas", rs.getString(4));
List<KeyRange> splits = getAllSplits(conn1, TENANT_TABLE_NAME);
assertEquals(3, splits.size());
} finally {
conn1.close();
conn2.close();
}
}
use of org.apache.phoenix.query.KeyRange in project phoenix by apache.
the class MergeSortResultIteratorTest method testMergeSort.
@Test
public void testMergeSort() throws Throwable {
Tuple[] results1 = new Tuple[] { new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))) };
Tuple[] results2 = new Tuple[] { new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))) };
Tuple[] results3 = new Tuple[] { new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))) };
final List<PeekingResultIterator> results = new ArrayList<PeekingResultIterator>(Arrays.asList(new PeekingResultIterator[] { new MaterializedResultIterator(Arrays.asList(results1)), new MaterializedResultIterator(Arrays.asList(results2)), new MaterializedResultIterator(Arrays.asList(results3)) }));
Tuple[] expectedResults = new Tuple[] { new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), new SingleKeyValueTuple(new KeyValue(A, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))), new SingleKeyValueTuple(new KeyValue(B, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, Bytes.toBytes(1))) };
ResultIterators iterators = new ResultIterators() {
@Override
public List<PeekingResultIterator> getIterators() throws SQLException {
return results;
}
@Override
public int size() {
return results.size();
}
@Override
public void explain(List<String> planSteps) {
}
@Override
public List<KeyRange> getSplits() {
return Collections.emptyList();
}
@Override
public List<List<Scan>> getScans() {
return Collections.emptyList();
}
@Override
public void close() throws SQLException {
}
};
ResultIterators reverseIterators = new ResultIterators() {
@Override
public List<PeekingResultIterator> getIterators() throws SQLException {
return results;
}
@Override
public int size() {
return results.size();
}
@Override
public void explain(List<String> planSteps) {
}
@Override
public List<KeyRange> getSplits() {
return Collections.emptyList();
}
@Override
public List<List<Scan>> getScans() {
return Collections.emptyList();
}
@Override
public void close() throws SQLException {
}
};
ResultIterator scanner = new MergeSortRowKeyResultIterator(iterators);
AssertResults.assertResults(scanner, expectedResults);
}
use of org.apache.phoenix.query.KeyRange in project phoenix by apache.
the class MetaDataEndpointImpl method buildFunctions.
private List<PFunction> buildFunctions(List<byte[]> keys, Region region, long clientTimeStamp, boolean isReplace, List<Mutation> deleteMutationsForReplace) throws IOException, SQLException {
List<KeyRange> keyRanges = Lists.newArrayListWithExpectedSize(keys.size());
for (byte[] key : keys) {
byte[] stopKey = ByteUtil.concat(key, QueryConstants.SEPARATOR_BYTE_ARRAY);
ByteUtil.nextKey(stopKey, stopKey.length);
keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, false));
}
Scan scan = new Scan();
scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
scanRanges.initializeScan(scan);
scan.setFilter(scanRanges.getSkipScanFilter());
RegionScanner scanner = region.getScanner(scan);
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
List<PFunction> functions = new ArrayList<PFunction>();
PFunction function = null;
try {
for (int i = 0; i < keys.size(); i++) {
function = null;
function = getFunction(scanner, isReplace, clientTimeStamp, deleteMutationsForReplace);
if (function == null) {
return null;
}
byte[] functionKey = SchemaUtil.getFunctionKey(function.getTenantId() == null ? ByteUtil.EMPTY_BYTE_ARRAY : function.getTenantId().getBytes(), Bytes.toBytes(function.getFunctionName()));
metaDataCache.put(new FunctionBytesPtr(functionKey), function);
functions.add(function);
}
return functions;
} finally {
scanner.close();
}
}
use of org.apache.phoenix.query.KeyRange in project phoenix by apache.
the class MetaDataEndpointImpl method buildSchemas.
private List<PSchema> buildSchemas(List<byte[]> keys, Region region, long clientTimeStamp, ImmutableBytesPtr cacheKey) throws IOException, SQLException {
List<KeyRange> keyRanges = Lists.newArrayListWithExpectedSize(keys.size());
for (byte[] key : keys) {
byte[] stopKey = ByteUtil.concat(key, QueryConstants.SEPARATOR_BYTE_ARRAY);
ByteUtil.nextKey(stopKey, stopKey.length);
keyRanges.add(PVarbinary.INSTANCE.getKeyRange(key, true, stopKey, false));
}
Scan scan = new Scan();
scan.setTimeRange(MIN_TABLE_TIMESTAMP, clientTimeStamp);
ScanRanges scanRanges = ScanRanges.createPointLookup(keyRanges);
scanRanges.initializeScan(scan);
scan.setFilter(scanRanges.getSkipScanFilter());
RegionScanner scanner = region.getScanner(scan);
Cache<ImmutableBytesPtr, PMetaDataEntity> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
List<PSchema> schemas = new ArrayList<PSchema>();
PSchema schema = null;
try {
for (int i = 0; i < keys.size(); i++) {
schema = null;
schema = getSchema(scanner, clientTimeStamp);
if (schema == null) {
return null;
}
metaDataCache.put(cacheKey, schema);
schemas.add(schema);
}
return schemas;
} finally {
scanner.close();
}
}
use of org.apache.phoenix.query.KeyRange in project phoenix by apache.
the class ScanRanges method getBoundSlotCount.
public int getBoundSlotCount() {
int count = 0;
boolean hasUnbound = false;
int nRanges = ranges.size();
for (int i = 0; i < nRanges && !hasUnbound; i++) {
List<KeyRange> orRanges = ranges.get(i);
for (KeyRange range : orRanges) {
if (range == KeyRange.EVERYTHING_RANGE) {
return count;
}
if (range.isUnbound()) {
hasUnbound = true;
}
}
count++;
}
return count;
}
Aggregations