use of org.locationtech.geowave.datastore.hbase.filters.HBaseDistributableFilter in project geowave by locationtech.
the class HBaseReader method addDistFilter.
private void addDistFilter(final ReaderParams<T> params, final FilterList filterList) {
final HBaseDistributableFilter hbdFilter = new HBaseDistributableFilter();
if (wholeRowEncoding) {
hbdFilter.setWholeRowFilter(true);
}
hbdFilter.setPartitionKeyLength(partitionKeyLength);
final List<QueryFilter> distFilters = Lists.newArrayList();
distFilters.add(params.getFilter());
hbdFilter.init(distFilters, params.getIndex().getIndexModel(), params.getAdditionalAuthorizations());
filterList.addFilter(hbdFilter);
}
use of org.locationtech.geowave.datastore.hbase.filters.HBaseDistributableFilter in project geowave by locationtech.
the class AggregationEndpoint method aggregate.
@Override
public void aggregate(final RpcController controller, final AggregationProtosServer.AggregationRequest request, final RpcCallback<AggregationProtosServer.AggregationResponse> done) {
FilterList filterList = null;
InternalDataAdapter<?> dataAdapter = null;
AdapterToIndexMapping indexMapping = null;
Short internalAdapterId = null;
AggregationProtosServer.AggregationResponse response = null;
ByteString value = ByteString.EMPTY;
// Get the aggregation type
final Aggregation aggregation = (Aggregation) URLClassloaderUtils.fromClassId(request.getAggregation().getClassId().toByteArray());
// Handle aggregation params
if (request.getAggregation().hasParams()) {
final byte[] parameterBytes = request.getAggregation().getParams().toByteArray();
final Persistable aggregationParams = URLClassloaderUtils.fromBinary(parameterBytes);
aggregation.setParameters(aggregationParams);
}
HBaseDistributableFilter hdFilter = null;
if (aggregation != null) {
if (request.hasRangeFilter()) {
final byte[] rfilterBytes = request.getRangeFilter().toByteArray();
try {
final MultiRowRangeFilter rangeFilter = MultiRowRangeFilter.parseFrom(rfilterBytes);
filterList = new FilterList(rangeFilter);
} catch (final Exception e) {
LOGGER.error("Error creating range filter.", e);
}
} else {
LOGGER.error("Input range filter is undefined.");
}
if (request.hasNumericIndexStrategyFilter()) {
final byte[] nisFilterBytes = request.getNumericIndexStrategyFilter().toByteArray();
try {
final HBaseNumericIndexStrategyFilter numericIndexStrategyFilter = HBaseNumericIndexStrategyFilter.parseFrom(nisFilterBytes);
if (filterList == null) {
filterList = new FilterList(numericIndexStrategyFilter);
} else {
filterList.addFilter(numericIndexStrategyFilter);
}
} catch (final Exception e) {
LOGGER.error("Error creating index strategy filter.", e);
}
}
try {
// in the filter list for the dedupe filter to work correctly
if (request.hasModel()) {
hdFilter = new HBaseDistributableFilter();
if (request.hasWholeRowFilter()) {
hdFilter.setWholeRowFilter(request.getWholeRowFilter());
}
if (request.hasPartitionKeyLength()) {
hdFilter.setPartitionKeyLength(request.getPartitionKeyLength());
}
final byte[] filterBytes;
if (request.hasFilter()) {
filterBytes = request.getFilter().toByteArray();
} else {
filterBytes = null;
}
final byte[] modelBytes = request.getModel().toByteArray();
if (hdFilter.init(filterBytes, modelBytes)) {
if (filterList == null) {
filterList = new FilterList(hdFilter);
} else {
filterList.addFilter(hdFilter);
}
} else {
LOGGER.error("Error creating distributable filter.");
}
} else {
LOGGER.error("Input distributable filter is undefined.");
}
} catch (final Exception e) {
LOGGER.error("Error creating distributable filter.", e);
}
if (request.hasAdapter()) {
final byte[] adapterBytes = request.getAdapter().toByteArray();
dataAdapter = (InternalDataAdapter<?>) URLClassloaderUtils.fromBinary(adapterBytes);
}
if (request.hasInternalAdapterId()) {
final byte[] adapterIdBytes = request.getInternalAdapterId().toByteArray();
internalAdapterId = ByteArrayUtils.byteArrayToShort(adapterIdBytes);
}
if (request.hasIndexMapping()) {
final byte[] mappingBytes = request.getIndexMapping().toByteArray();
indexMapping = (AdapterToIndexMapping) URLClassloaderUtils.fromBinary(mappingBytes);
}
final String[] authorizations;
if (request.hasVisLabels()) {
final byte[] visBytes = request.getVisLabels().toByteArray();
if (visBytes.length > 0) {
authorizations = StringUtils.stringsFromBinary(visBytes);
} else {
authorizations = null;
}
} else {
authorizations = null;
}
try {
final Object result = getValue(aggregation, filterList, dataAdapter, indexMapping, internalAdapterId, hdFilter, request.getBlockCaching(), request.getCacheSize(), authorizations);
URLClassloaderUtils.initClassLoader();
final byte[] bvalue = aggregation.resultToBinary(result);
value = ByteString.copyFrom(bvalue);
} catch (final IOException ioe) {
LOGGER.error("Error during aggregation.", ioe);
/*
* ResponseConverter.setControllerException( controller, ioe);
*/
} catch (final Exception e) {
LOGGER.error("Error during aggregation.", e);
}
}
response = AggregationProtosServer.AggregationResponse.newBuilder().setValue(value).build();
done.run(response);
}
use of org.locationtech.geowave.datastore.hbase.filters.HBaseDistributableFilter in project geowave by locationtech.
the class HBaseBulkDeleteEndpoint method delete.
@Override
public void delete(final RpcController controller, final BulkDeleteRequest request, final RpcCallback<BulkDeleteResponse> done) {
long totalRowsDeleted = 0L;
long totalVersionsDeleted = 0L;
FilterList filterList = null;
final List<byte[]> adapterIds = new ArrayList<>();
Long timestamp = null;
if (request.hasTimestamp()) {
timestamp = request.getTimestamp();
}
final BulkDeleteType deleteType = request.getDeleteType();
/**
* Extract the filter from the bulkDeleteRequest
*/
HBaseDistributableFilter hdFilter = null;
if (request.hasRangeFilter()) {
final byte[] rfilterBytes = request.getRangeFilter().toByteArray();
try {
final MultiRowRangeFilter rangeFilter = MultiRowRangeFilter.parseFrom(rfilterBytes);
filterList = new FilterList(rangeFilter);
} catch (final Exception e) {
LOGGER.error("Error creating range filter.", e);
}
} else {
LOGGER.error("Input range filter is undefined.");
}
if (request.hasNumericIndexStrategyFilter()) {
final byte[] nisFilterBytes = request.getNumericIndexStrategyFilter().toByteArray();
try {
final HBaseNumericIndexStrategyFilter numericIndexStrategyFilter = HBaseNumericIndexStrategyFilter.parseFrom(nisFilterBytes);
if (filterList == null) {
filterList = new FilterList(numericIndexStrategyFilter);
} else {
filterList.addFilter(numericIndexStrategyFilter);
}
} catch (final Exception e) {
LOGGER.error("Error creating index strategy filter.", e);
}
}
try {
// in the filter list for the dedupe filter to work correctly
if (request.hasModel()) {
hdFilter = new HBaseDistributableFilter();
final byte[] filterBytes;
if (request.hasFilter()) {
filterBytes = request.getFilter().toByteArray();
} else {
filterBytes = null;
}
final byte[] modelBytes = request.getModel().toByteArray();
if (hdFilter.init(filterBytes, modelBytes)) {
if (filterList == null) {
filterList = new FilterList(hdFilter);
} else {
filterList.addFilter(hdFilter);
}
} else {
LOGGER.error("Error creating distributable filter.");
}
} else {
LOGGER.error("Input distributable filter is undefined.");
}
} catch (final Exception e) {
LOGGER.error("Error creating distributable filter.", e);
}
if (request.hasAdapterIds()) {
final ByteBuffer buf = ByteBuffer.wrap(request.getAdapterIds().toByteArray());
adapterIds.clear();
while (buf.hasRemaining()) {
final short adapterId = buf.getShort();
adapterIds.add(StringUtils.stringToBinary(ByteArrayUtils.shortToString(adapterId)));
}
}
/**
* Start the actual delete process
*/
RegionScanner scanner = null;
try {
scanner = null;
final Scan scan = new Scan();
scan.setFilter(filterList);
if (!adapterIds.isEmpty()) {
for (final byte[] adapterId : adapterIds) {
scan.addFamily(adapterId);
}
}
final Region region = env.getRegion();
scanner = region.getScanner(scan);
boolean hasMore = true;
final int rowBatchSize = request.getRowBatchSize();
while (hasMore) {
final List<List<Cell>> deleteRows = new ArrayList<>(rowBatchSize);
for (int i = 0; i < rowBatchSize; i++) {
final List<Cell> results = new ArrayList<>();
hasMore = scanner.next(results);
if (results.size() > 0) {
deleteRows.add(results);
}
if (!hasMore) {
// There are no more rows.
break;
}
}
if (deleteRows.size() > 0) {
final Mutation[] deleteArr = new Mutation[deleteRows.size()];
int i = 0;
for (final List<Cell> deleteRow : deleteRows) {
deleteArr[i++] = createDeleteMutation(deleteRow, deleteType, timestamp);
}
final OperationStatus[] opStatus = batchMutate(region, deleteArr);
for (i = 0; i < opStatus.length; i++) {
if (opStatus[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) {
break;
}
totalRowsDeleted++;
if (deleteType == BulkDeleteType.VERSION) {
final byte[] versionsDeleted = deleteArr[i].getAttribute(NO_OF_VERSIONS_TO_DELETE);
if (versionsDeleted != null) {
totalVersionsDeleted += Bytes.toInt(versionsDeleted);
}
}
}
}
}
} catch (final IOException e) {
LOGGER.error("Unable to delete rows", e);
} finally {
if (scanner != null) {
try {
scanner.close();
} catch (final IOException ioe) {
LOGGER.error("Error during bulk delete in HBase.", ioe);
;
}
}
}
final Builder responseBuilder = BulkDeleteResponse.newBuilder();
responseBuilder.setRowsDeleted(totalRowsDeleted);
if (deleteType == BulkDeleteType.VERSION) {
responseBuilder.setVersionsDeleted(totalVersionsDeleted);
}
// Send the response back
final BulkDeleteResponse response = responseBuilder.build();
done.run(response);
}
Aggregations