use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.
the class UpsertCompiler method setValues.
private static void setValues(byte[][] values, int[] pkSlotIndex, int[] columnIndexes, PTable table, Map<ImmutableBytesPtr, RowMutationState> mutation, PhoenixStatement statement, boolean useServerTimestamp, IndexMaintainer maintainer, byte[][] viewConstants, byte[] onDupKeyBytes, int numSplColumns) throws SQLException {
Map<PColumn, byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length);
byte[][] pkValues = new byte[table.getPKColumns().size()][];
// here and we will fill in the byte later in PRowImpl.
if (table.getBucketNum() != null) {
pkValues[0] = new byte[] { 0 };
}
for (int i = 0; i < numSplColumns; i++) {
pkValues[i + (table.getBucketNum() != null ? 1 : 0)] = values[i];
}
// case when the table doesn't have a row timestamp column
Long rowTimestamp = null;
RowTimestampColInfo rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp);
for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
byte[] value = values[j];
PColumn column = table.getColumns().get(columnIndexes[i]);
if (SchemaUtil.isPKColumn(column)) {
pkValues[pkSlotIndex[i]] = value;
if (SchemaUtil.getPKPosition(table, column) == table.getRowTimestampColPos()) {
if (!useServerTimestamp) {
PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos());
rowTimestamp = PLong.INSTANCE.getCodec().decodeLong(value, 0, rowTimestampCol.getSortOrder());
if (rowTimestamp < 0) {
throw new IllegalDataException("Value of a column designated as ROW_TIMESTAMP cannot be less than zero");
}
rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp);
}
}
} else {
columnValues.put(column, value);
}
}
ImmutableBytesPtr ptr = new ImmutableBytesPtr();
table.newKey(ptr, pkValues);
if (table.getIndexType() == IndexType.LOCAL && maintainer != null) {
byte[] rowKey = maintainer.buildDataRowKey(ptr, viewConstants);
HRegionLocation region = statement.getConnection().getQueryServices().getTableRegionLocation(table.getParentName().getBytes(), rowKey);
byte[] regionPrefix = region.getRegionInfo().getStartKey().length == 0 ? new byte[region.getRegionInfo().getEndKey().length] : region.getRegionInfo().getStartKey();
if (regionPrefix.length != 0) {
ptr.set(ScanRanges.prefixKey(ptr.get(), 0, regionPrefix, regionPrefix.length));
}
}
mutation.put(ptr, new RowMutationState(columnValues, statement.getConnection().getStatementExecutionCounter(), rowTsColInfo, onDupKeyBytes));
}
use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.
the class MapReduceParallelScanGrouper method getRegionLocationsFromManifest.
private List<HRegionLocation> getRegionLocationsFromManifest(SnapshotManifest manifest) {
List<SnapshotProtos.SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
Preconditions.checkNotNull(regionManifests);
List<HRegionLocation> regionLocations = Lists.newArrayListWithCapacity(regionManifests.size());
for (SnapshotProtos.SnapshotRegionManifest regionManifest : regionManifests) {
regionLocations.add(new HRegionLocation(HRegionInfo.convert(regionManifest.getRegionInfo()), null));
}
return regionLocations;
}
use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.
the class QueryCompilerTest method testSaltTableJoin.
@Test
public void testSaltTableJoin() throws Exception {
PhoenixConnection conn = (PhoenixConnection) DriverManager.getConnection(getUrl());
try {
conn.createStatement().execute("drop table if exists SALT_TEST2900");
conn.createStatement().execute("create table SALT_TEST2900" + "(" + "id UNSIGNED_INT not null primary key," + "appId VARCHAR" + ")SALT_BUCKETS=2");
conn.createStatement().execute("drop table if exists RIGHT_TEST2900 ");
conn.createStatement().execute("create table RIGHT_TEST2900" + "(" + "appId VARCHAR not null primary key," + "createTime VARCHAR" + ")");
String sql = "select * from SALT_TEST2900 a inner join RIGHT_TEST2900 b on a.appId=b.appId where a.id>=3 and a.id<=5";
HashJoinPlan plan = (HashJoinPlan) getQueryPlan(sql, Collections.emptyList());
ScanRanges ranges = plan.getContext().getScanRanges();
List<HRegionLocation> regionLocations = conn.getQueryServices().getAllTableRegions(Bytes.toBytes("SALT_TEST2900"));
for (HRegionLocation regionLocation : regionLocations) {
assertTrue(ranges.intersectRegion(regionLocation.getRegionInfo().getStartKey(), regionLocation.getRegionInfo().getEndKey(), false));
}
} finally {
conn.close();
}
}
use of org.apache.hadoop.hbase.HRegionLocation in project phoenix by apache.
the class ParallelIteratorsSplitTest method testGetSplitsWithSkipScanFilter.
@Test
public void testGetSplitsWithSkipScanFilter() throws Exception {
byte[][] splits = new byte[][] { Ka1A, Ka1B, Ka1E, Ka1G, Ka1I, Ka2A };
createTestTable(getUrl(), DDL, splits, null);
Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
Connection conn = DriverManager.getConnection(getUrl(), props);
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), TABLE_NAME));
TableRef tableRef = new TableRef(table);
List<HRegionLocation> regions = pconn.getQueryServices().getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
List<KeyRange> ranges = getSplits(tableRef, scan, regions, scanRanges);
assertEquals("Unexpected number of splits: " + ranges.size(), expectedSplits.size(), ranges.size());
for (int i = 0; i < expectedSplits.size(); i++) {
assertEquals(expectedSplits.get(i), ranges.get(i));
}
}
use of org.apache.hadoop.hbase.HRegionLocation in project hbase by apache.
the class TestAsyncProcess method createHConnectionWithReplicas.
private static ClusterConnection createHConnectionWithReplicas() throws IOException {
ClusterConnection hc = createHConnectionCommon();
setMockLocation(hc, DUMMY_BYTES_1, hrls1);
setMockLocation(hc, DUMMY_BYTES_2, hrls2);
setMockLocation(hc, DUMMY_BYTES_3, hrls3);
List<HRegionLocation> locations = new ArrayList<>();
for (HRegionLocation loc : hrls1.getRegionLocations()) {
locations.add(loc);
}
for (HRegionLocation loc : hrls2.getRegionLocations()) {
locations.add(loc);
}
for (HRegionLocation loc : hrls3.getRegionLocations()) {
locations.add(loc);
}
Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), Mockito.anyBoolean())).thenReturn(locations);
return hc;
}
Aggregations