use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class DeleteCompiler method deleteRows.
private static MutationState deleteRows(StatementContext childContext, TableRef targetTableRef, List<TableRef> indexTableRefs, ResultIterator iterator, RowProjector projector, TableRef sourceTableRef) throws SQLException {
PTable table = targetTableRef.getTable();
PhoenixStatement statement = childContext.getStatement();
PhoenixConnection connection = statement.getConnection();
PName tenantId = connection.getTenantId();
byte[] tenantIdBytes = null;
if (tenantId != null) {
tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, table.getViewIndexId() != null);
}
final boolean isAutoCommit = connection.getAutoCommit();
ConnectionQueryServices services = connection.getQueryServices();
final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
Map<ImmutableBytesPtr, RowMutationState> mutations = Maps.newHashMapWithExpectedSize(batchSize);
List<Map<ImmutableBytesPtr, RowMutationState>> indexMutations = null;
// the data table through a single query to save executing an additional one.
if (!indexTableRefs.isEmpty()) {
indexMutations = Lists.newArrayListWithExpectedSize(indexTableRefs.size());
for (int i = 0; i < indexTableRefs.size(); i++) {
indexMutations.add(Maps.<ImmutableBytesPtr, RowMutationState>newHashMapWithExpectedSize(batchSize));
}
}
List<PColumn> pkColumns = table.getPKColumns();
boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
boolean isSharedViewIndex = table.getViewIndexId() != null;
int offset = (table.getBucketNum() == null ? 0 : 1);
byte[][] values = new byte[pkColumns.size()][];
if (isSharedViewIndex) {
values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
}
if (isMultiTenant) {
values[offset++] = tenantIdBytes;
}
try (PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
int rowCount = 0;
while (rs.next()) {
// allocate new as this is a key in a Map
ImmutableBytesPtr ptr = new ImmutableBytesPtr();
// there's no transation required.
if (sourceTableRef.equals(targetTableRef)) {
rs.getCurrentRow().getKey(ptr);
} else {
for (int i = offset; i < values.length; i++) {
byte[] byteValue = rs.getBytes(i + 1 - offset);
// TODO: consider going under the hood and just getting the bytes
if (pkColumns.get(i).getSortOrder() == SortOrder.DESC) {
byte[] tempByteValue = Arrays.copyOf(byteValue, byteValue.length);
byteValue = SortOrder.invert(byteValue, 0, tempByteValue, 0, byteValue.length);
}
values[i] = byteValue;
}
table.newKey(ptr, values);
}
// When issuing deletes, we do not care about the row time ranges. Also, if the table had a row timestamp column, then the
// row key will already have its value.
mutations.put(ptr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
for (int i = 0; i < indexTableRefs.size(); i++) {
// allocate new as this is a key in a Map
ImmutableBytesPtr indexPtr = new ImmutableBytesPtr();
rs.getCurrentRow().getKey(indexPtr);
indexMutations.get(i).put(indexPtr, new RowMutationState(PRow.DELETE_MARKER, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
}
if (mutations.size() > maxSize) {
throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
}
rowCount++;
// Commit a batch if auto commit is true and we're at our batch size
if (isAutoCommit && rowCount % batchSize == 0) {
MutationState state = new MutationState(targetTableRef, mutations, 0, maxSize, maxSizeBytes, connection);
connection.getMutationState().join(state);
for (int i = 0; i < indexTableRefs.size(); i++) {
MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
connection.getMutationState().join(indexState);
}
connection.getMutationState().send();
mutations.clear();
if (indexMutations != null) {
indexMutations.clear();
}
}
}
// If auto commit is true, this last batch will be committed upon return
int nCommittedRows = isAutoCommit ? (rowCount / batchSize * batchSize) : 0;
MutationState state = new MutationState(targetTableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
for (int i = 0; i < indexTableRefs.size(); i++) {
// To prevent the counting of these index rows, we have a negative for remainingRows.
MutationState indexState = new MutationState(indexTableRefs.get(i), indexMutations.get(i), 0, maxSize, maxSizeBytes, connection);
state.join(indexState);
}
return state;
}
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class DropSequenceCompiler method compile.
public MutationPlan compile(final DropSequenceStatement sequence) throws SQLException {
final PhoenixConnection connection = statement.getConnection();
final MetaDataClient client = new MetaDataClient(connection);
final StatementContext context = new StatementContext(statement);
return new BaseMutationPlan(context, operation) {
@Override
public MutationState execute() throws SQLException {
return client.dropSequence(sequence);
}
@Override
public ExplainPlan getExplainPlan() throws SQLException {
return new ExplainPlan(Collections.singletonList("DROP SEQUENCE"));
}
};
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class TestUtil method doMajorCompaction.
/**
* Runs a major compaction, and then waits until the compaction is complete before returning.
*
* @param tableName name of the table to be compacted
*/
public static void doMajorCompaction(Connection conn, String tableName) throws Exception {
tableName = SchemaUtil.normalizeIdentifier(tableName);
// We simply write a marker row, request a major compaction, and then wait until the marker
// row is gone
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), tableName));
ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
MutationState mutationState = pconn.getMutationState();
if (table.isTransactional()) {
mutationState.startTransaction();
}
try (HTableInterface htable = mutationState.getHTable(table)) {
byte[] markerRowKey = Bytes.toBytes("TO_DELETE");
Put put = new Put(markerRowKey);
put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
htable.put(put);
Delete delete = new Delete(markerRowKey);
delete.deleteColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
htable.delete(delete);
htable.close();
if (table.isTransactional()) {
mutationState.commit();
}
HBaseAdmin hbaseAdmin = services.getAdmin();
hbaseAdmin.flush(tableName);
hbaseAdmin.majorCompact(tableName);
hbaseAdmin.close();
boolean compactionDone = false;
while (!compactionDone) {
Thread.sleep(6000L);
Scan scan = new Scan();
scan.setStartRow(markerRowKey);
scan.setStopRow(Bytes.add(markerRowKey, new byte[] { 0 }));
scan.setRaw(true);
try (HTableInterface htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
ResultScanner scanner = htableForRawScan.getScanner(scan);
List<Result> results = Lists.newArrayList(scanner);
LOG.info("Results: " + results);
compactionDone = results.isEmpty();
scanner.close();
}
LOG.info("Compaction done: " + compactionDone);
// need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
if (!compactionDone && table.isTransactional()) {
hbaseAdmin = services.getAdmin();
hbaseAdmin.flush(tableName);
hbaseAdmin.majorCompact(tableName);
hbaseAdmin.close();
}
}
}
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class TestUtil method getGuidePostsList.
public static Collection<GuidePostsInfo> getGuidePostsList(Connection conn, String tableName, String pkCol, byte[] lowerRange, byte[] upperRange, String whereClauseSuffix) throws SQLException {
String whereClauseStart = (lowerRange == null && upperRange == null ? "" : " WHERE " + ((lowerRange != null ? (pkCol + " >= ? " + (upperRange != null ? " AND " : "")) : "") + (upperRange != null ? (pkCol + " < ?") : "")));
String whereClause = whereClauseSuffix == null ? whereClauseStart : whereClauseStart.length() == 0 ? (" WHERE " + whereClauseSuffix) : (" AND " + whereClauseSuffix);
String query = "SELECT /*+ NO_INDEX */ COUNT(*) FROM " + tableName + whereClause;
PhoenixPreparedStatement pstmt = conn.prepareStatement(query).unwrap(PhoenixPreparedStatement.class);
if (lowerRange != null) {
pstmt.setBytes(1, lowerRange);
}
if (upperRange != null) {
pstmt.setBytes(lowerRange != null ? 2 : 1, upperRange);
}
pstmt.execute();
TableRef tableRef = pstmt.getQueryPlan().getTableRef();
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
PTable table = tableRef.getTable();
GuidePostsInfo info = pconn.getQueryServices().getTableStats(new GuidePostsKey(table.getName().getBytes(), SchemaUtil.getEmptyColumnFamily(table)));
return Collections.singletonList(info);
}
use of org.apache.phoenix.jdbc.PhoenixConnection in project phoenix by apache.
the class TestUtil method clearMetaDataCache.
public static void clearMetaDataCache(Connection conn) throws Throwable {
PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
HTableInterface htable = pconn.getQueryServices().getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
htable.coprocessorService(MetaDataService.class, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, new Batch.Call<MetaDataService, ClearCacheResponse>() {
@Override
public ClearCacheResponse call(MetaDataService instance) throws IOException {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<ClearCacheResponse> rpcCallback = new BlockingRpcCallback<ClearCacheResponse>();
ClearCacheRequest.Builder builder = ClearCacheRequest.newBuilder();
instance.clearCache(controller, builder.build(), rpcCallback);
if (controller.getFailedOn() != null) {
throw controller.getFailedOn();
}
return rpcCallback.get();
}
});
}
Aggregations