use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project phoenix by apache.
the class MetaDataEndpointImpl method findChildViews_deprecated.
// TODO remove this in 4.13 release
@Deprecated
private TableViewFinder findChildViews_deprecated(Region region, byte[] tenantId, PTable table, byte[] linkTypeBytes) throws IOException {
byte[] schemaName = table.getSchemaName().getBytes();
byte[] tableName = table.getTableName().getBytes();
boolean isMultiTenant = table.isMultiTenant();
Scan scan = new Scan();
// the same tenantId.
if (!isMultiTenant) {
byte[] startRow = ByteUtil.concat(tenantId, QueryConstants.SEPARATOR_BYTE_ARRAY);
byte[] stopRow = ByteUtil.nextKey(startRow);
scan.setStartRow(startRow);
scan.setStopRow(stopRow);
}
SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareOp.EQUAL, linkTypeBytes);
SingleColumnValueFilter tableTypeFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES, CompareOp.EQUAL, PTableType.VIEW.getSerializedValue().getBytes());
tableTypeFilter.setFilterIfMissing(false);
linkFilter.setFilterIfMissing(true);
byte[] suffix = ByteUtil.concat(QueryConstants.SEPARATOR_BYTE_ARRAY, SchemaUtil.getPhysicalHBaseTableName(schemaName, tableName, table.isNamespaceMapped()).getBytes());
SuffixFilter rowFilter = new SuffixFilter(suffix);
FilterList filter = new FilterList(linkFilter, tableTypeFilter, rowFilter);
scan.setFilter(filter);
scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
scan.addColumn(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
scan.addColumn(TABLE_FAMILY_BYTES, TABLE_SEQ_NUM_BYTES);
// These deprecated calls work around the issue
try (HTableInterface hTable = ServerUtil.getHTableForCoprocessorScan(env, region.getTableDesc().getTableName().getName())) {
boolean allViewsInCurrentRegion = true;
int numOfChildViews = 0;
List<ViewInfo> viewInfoList = Lists.newArrayList();
try (ResultScanner scanner = hTable.getScanner(scan)) {
for (Result result = scanner.next(); (result != null); result = scanner.next()) {
numOfChildViews++;
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
ResultTuple resultTuple = new ResultTuple(result);
resultTuple.getKey(ptr);
byte[] key = ptr.copyBytes();
if (checkTableKeyInRegion(key, region) != null) {
allViewsInCurrentRegion = false;
}
byte[][] rowKeyMetaData = new byte[3][];
getVarChars(result.getRow(), 3, rowKeyMetaData);
byte[] viewTenantId = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
byte[] viewSchemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
byte[] viewName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
viewInfoList.add(new ViewInfo(viewTenantId, viewSchemaName, viewName));
}
TableViewFinder tableViewFinderResult = new TableViewFinder(viewInfoList);
if (numOfChildViews > 0 && !allViewsInCurrentRegion) {
tableViewFinderResult.setAllViewsNotInSingleRegion();
}
return tableViewFinderResult;
}
}
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project phoenix by apache.
the class IndexHalfStoreFileReaderGenerator method preStoreFileReaderOpen.
@Override
public StoreFileReader preStoreFileReaderOpen(ObserverContext<RegionCoprocessorEnvironment> ctx, FileSystem fs, Path p, FSDataInputStreamWrapper in, long size, CacheConfig cacheConf, Reference r, StoreFileReader reader) throws IOException {
TableName tableName = ctx.getEnvironment().getRegion().getTableDescriptor().getTableName();
Region region = ctx.getEnvironment().getRegion();
RegionInfo childRegion = region.getRegionInfo();
byte[] splitKey = null;
if (reader == null && r != null) {
if (!p.toString().contains(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)) {
return reader;
}
PhoenixConnection conn = null;
Table metaTable = null;
byte[] regionStartKeyInHFile = null;
try (Connection hbaseConn = ConnectionFactory.createConnection(ctx.getEnvironment().getConfiguration())) {
Scan scan = CompatUtil.getScanForTableName(hbaseConn, tableName);
SingleColumnValueFilter scvf = null;
if (Reference.isTopFileRegion(r.getFileRegion())) {
scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, CompareOperator.EQUAL, RegionInfoUtil.toByteArray(region.getRegionInfo()));
scvf.setFilterIfMissing(true);
} else {
scvf = new SingleColumnValueFilter(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, CompareOperator.EQUAL, RegionInfoUtil.toByteArray(region.getRegionInfo()));
scvf.setFilterIfMissing(true);
}
if (scvf != null)
scan.setFilter(scvf);
metaTable = hbaseConn.getTable(TableName.META_TABLE_NAME);
Result result = null;
try (ResultScanner scanner = metaTable.getScanner(scan)) {
result = scanner.next();
}
if (result == null || result.isEmpty()) {
List<RegionInfo> mergeRegions = MetaTableAccessor.getMergeRegions(ctx.getEnvironment().getConnection(), region.getRegionInfo().getRegionName());
if (mergeRegions == null || mergeRegions.isEmpty()) {
return reader;
}
byte[] splitRow = CellUtil.cloneRow(KeyValueUtil.createKeyValueFromKey(r.getSplitKey()));
// is equal to merged region start key. So returning same reader.
if (Bytes.compareTo(mergeRegions.get(0).getStartKey(), splitRow) == 0) {
if (mergeRegions.get(0).getStartKey().length == 0 && region.getRegionInfo().getEndKey().length != mergeRegions.get(0).getEndKey().length) {
childRegion = mergeRegions.get(0);
regionStartKeyInHFile = mergeRegions.get(0).getStartKey().length == 0 ? new byte[mergeRegions.get(0).getEndKey().length] : mergeRegions.get(0).getStartKey();
} else {
return reader;
}
} else {
for (RegionInfo mergeRegion : mergeRegions.subList(1, mergeRegions.size())) {
if (Bytes.compareTo(mergeRegion.getStartKey(), splitRow) == 0) {
childRegion = mergeRegion;
regionStartKeyInHFile = mergeRegion.getStartKey();
break;
}
}
}
splitKey = KeyValueUtil.createFirstOnRow(region.getRegionInfo().getStartKey().length == 0 ? new byte[region.getRegionInfo().getEndKey().length] : region.getRegionInfo().getStartKey()).getKey();
} else {
RegionInfo parentRegion = MetaTableAccessor.getRegionInfo(result);
regionStartKeyInHFile = parentRegion.getStartKey().length == 0 ? new byte[parentRegion.getEndKey().length] : parentRegion.getStartKey();
}
} finally {
if (metaTable != null)
metaTable.close();
}
try {
conn = QueryUtil.getConnectionOnServer(ctx.getEnvironment().getConfiguration()).unwrap(PhoenixConnection.class);
PTable dataTable = IndexUtil.getPDataTable(conn, ctx.getEnvironment().getRegion().getTableDescriptor());
List<PTable> indexes = dataTable.getIndexes();
Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers = new HashMap<ImmutableBytesWritable, IndexMaintainer>();
for (PTable index : indexes) {
if (index.getIndexType() == IndexType.LOCAL) {
IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, conn);
indexMaintainers.put(new ImmutableBytesWritable(index.getviewIndexIdType().toBytes(index.getViewIndexId())), indexMaintainer);
}
}
if (indexMaintainers.isEmpty())
return reader;
byte[][] viewConstants = getViewConstants(dataTable);
return new IndexHalfStoreFileReader(fs, p, cacheConf, in, size, r, ctx.getEnvironment().getConfiguration(), indexMaintainers, viewConstants, childRegion, regionStartKeyInHFile, splitKey, childRegion.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID, new AtomicInteger(0), region.getRegionInfo());
} catch (SQLException e) {
throw new IOException(e);
} finally {
if (conn != null) {
try {
conn.close();
} catch (SQLException e) {
throw new IOException(e);
}
}
}
}
return reader;
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project phoenix by apache.
the class IndexTestUtil method assertRowsForEmptyColValue.
public static void assertRowsForEmptyColValue(Connection conn, String tableName, byte[] emptyValue) throws SQLException, IOException {
ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices();
PTable pTable = PhoenixRuntime.getTable(conn, tableName);
Table hTable = cqs.getTable(pTable.getPhysicalName().getBytes());
byte[] emptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(pTable);
byte[] emptyKeyValueQualifier = EncodedColumnsUtil.getEmptyKeyValueInfo(pTable).getFirst();
Scan scan = new Scan();
scan.setFilter(new SingleColumnValueFilter(emptyKeyValueCF, emptyKeyValueQualifier, CompareOperator.NOT_EQUAL, new org.apache.hadoop.hbase.filter.BinaryComparator(emptyValue)));
try (ResultScanner scanner = hTable.getScanner(scan)) {
assertNull("There are rows with in the table where the empty value is not " + Bytes.toStringBinary(emptyValue), scanner.next());
}
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project phoenix by apache.
the class UpgradeUtil method moveOrCopyChildLinks.
/**
* Move or copy child links form SYSTEM.CATALOG to SYSTEM.CHILD_LINK
* @param oldMetaConnection caller should take care of closing the passed connection appropriately
* @throws SQLException
*/
public static void moveOrCopyChildLinks(PhoenixConnection oldMetaConnection, Map<String, String> options) throws IOException {
long numberOfCopiedParentChildRows = 0;
long numberOfDeletedParentChildRows = 0;
boolean moveChildLinksDuringUpgradeEnabled = oldMetaConnection.getQueryServices().getProps().getBoolean(QueryServices.MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED, QueryServicesOptions.DEFAULT_MOVE_CHILD_LINKS_DURING_UPGRADE_ENABLED);
try (org.apache.hadoop.hbase.client.Connection moveChildLinkConnection = getHBaseConnection(options);
Table sysCatalogTable = moveChildLinkConnection.getTable(TableName.valueOf(SYSTEM_CATALOG_NAME))) {
boolean pageMore = false;
byte[] lastRowKey = null;
do {
Scan scan = new Scan();
scan.addFamily(DEFAULT_COLUMN_FAMILY_BYTES);
// Push down the filter to hbase to avoid transfer
SingleColumnValueFilter childLinkFilter = new SingleColumnValueFilter(DEFAULT_COLUMN_FAMILY_BYTES, LINK_TYPE_BYTES, CompareFilter.CompareOp.EQUAL, new byte[] { PTable.LinkType.CHILD_TABLE.getSerializedValue() });
childLinkFilter.setFilterIfMissing(true);
// Limit number of records
PageFilter pf = new PageFilter(DEFAULT_SCAN_PAGE_SIZE);
scan.setFilter(new FilterList(FilterList.Operator.MUST_PASS_ALL, pf, childLinkFilter));
if (pageMore) {
scan.withStartRow(lastRowKey, false);
}
// Collect the row keys to process them in batch
try (ResultScanner scanner = sysCatalogTable.getScanner(scan)) {
int count = 0;
List<byte[]> rowKeys = new ArrayList<>();
List<Put> puts = new ArrayList<>();
for (Result rr = scanner.next(); rr != null; rr = scanner.next()) {
count++;
lastRowKey = rr.getRow();
byte[] tmpKey = new byte[lastRowKey.length];
System.arraycopy(lastRowKey, 0, tmpKey, 0, tmpKey.length);
long rowTS = rr.rawCells()[0].getTimestamp();
rowKeys.add(tmpKey);
Put put = new Put(tmpKey);
put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, EMPTY_COLUMN_BYTES, rowTS, EMPTY_COLUMN_VALUE_BYTES);
put.addColumn(DEFAULT_COLUMN_FAMILY_BYTES, LINK_TYPE_BYTES, rowTS, LINK_ROW);
puts.add(put);
}
if (puts.size() > 0) {
Object[] putResults = new Object[puts.size()];
try (Table childLinkTable = moveChildLinkConnection.getTable(TableName.valueOf(SYSTEM_CHILD_LINK_NAME))) {
// Process a batch of child links
childLinkTable.batch(puts, putResults);
// if move child links is enabled instead of copy, delete the rows from SYSTEM.CATALOG.
if (moveChildLinksDuringUpgradeEnabled) {
List<Delete> deletes = Lists.newArrayList();
for (int i = 0; i < putResults.length; i++) {
if (java.util.Objects.nonNull(putResults[i])) {
deletes.add(new Delete(rowKeys.get(i)));
}
}
numberOfCopiedParentChildRows += deletes.size();
Object[] deleteResults = new Object[deletes.size()];
sysCatalogTable.batch(deletes, deleteResults);
int numDeletes = 0;
for (int i = 0; i < deleteResults.length; i++) {
if (java.util.Objects.nonNull(deleteResults[i])) {
numDeletes++;
}
}
numberOfDeletedParentChildRows += numDeletes;
} else {
int numCopied = 0;
for (int i = 0; i < putResults.length; i++) {
if (java.util.Objects.nonNull(putResults[i])) {
numCopied++;
}
}
numberOfCopiedParentChildRows += numCopied;
}
} catch (Exception e) {
LOGGER.error(String.format("Failed adding child link batch from %s to %s with Exception :", SYSTEM_CATALOG_NAME, SYSTEM_CHILD_LINK_NAME), e);
}
}
pageMore = count != 0;
LOGGER.info(String.format("moveOrCopyChildLinks in progress => numberOfCopiedParentChildRows: %d " + "numberOfDeletedParentChildRows: %d", numberOfCopiedParentChildRows, numberOfDeletedParentChildRows));
}
} while (pageMore);
} catch (IOException ioe) {
LOGGER.error(String.format("Failed adding child link rows from %s to %s with Exception :", SYSTEM_CATALOG_NAME, SYSTEM_CHILD_LINK_NAME), ioe);
throw ioe;
}
LOGGER.info(String.format("Finished moving/copying child link rows from %s to %s ", SYSTEM_CATALOG_NAME, SYSTEM_CHILD_LINK_NAME));
}
use of org.apache.hadoop.hbase.filter.SingleColumnValueFilter in project phoenix by apache.
the class ViewUtil method hasChildViews.
/**
* Check metadata to find if a given table/view has any immediate child views. Note that this
* is not resilient to orphan {@code parent->child } links.
* @param sysCatOrsysChildLink For older (pre-4.15.0) clients, we look for child links inside
* SYSTEM.CATALOG, otherwise we look for them inside
* SYSTEM.CHILD_LINK
* @param tenantId tenantId
* @param schemaName table schema name
* @param tableName table name
* @param timestamp passed client-side timestamp
* @return true if the given table has at least one child view
* @throws IOException thrown if there is an error scanning SYSTEM.CHILD_LINK or SYSTEM.CATALOG
*/
public static boolean hasChildViews(Table sysCatOrsysChildLink, byte[] tenantId, byte[] schemaName, byte[] tableName, long timestamp) throws IOException {
byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
Scan scan = MetaDataUtil.newTableRowsScan(key, MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp);
SingleColumnValueFilter linkFilter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES, CompareFilter.CompareOp.EQUAL, LinkType.CHILD_TABLE.getSerializedValueAsByteArray()) {
// if we found a row with the CHILD_TABLE link type we are done and can
// terminate the scan
@Override
public boolean filterAllRemaining() {
return matchedColumn;
}
};
linkFilter.setFilterIfMissing(true);
scan.setFilter(linkFilter);
scan.addColumn(TABLE_FAMILY_BYTES, LINK_TYPE_BYTES);
try (ResultScanner scanner = sysCatOrsysChildLink.getScanner(scan)) {
Result result = scanner.next();
return result != null;
}
}
Aggregations