use of org.apache.phoenix.execute.MutationState.MultiRowMutationState in project phoenix by apache.
the class PartialCommitIT method getConnectionWithTableOrderPreservingMutationState.
private PhoenixConnection getConnectionWithTableOrderPreservingMutationState() throws SQLException {
Connection con = driver.connect(url, new Properties());
PhoenixConnection phxCon = new PhoenixConnection(con.unwrap(PhoenixConnection.class));
final Map<TableRef, MultiRowMutationState> mutations = Maps.newTreeMap(new TableRefComparator());
// passing a null mutation state forces the connection.newMutationState() to be used to create the MutationState
return new PhoenixConnection(phxCon, null) {
@Override
protected MutationState newMutationState(int maxSize, int maxSizeBytes) {
return new MutationState(maxSize, maxSizeBytes, this, mutations, false, null);
}
};
}
use of org.apache.phoenix.execute.MutationState.MultiRowMutationState in project phoenix by apache.
the class DeleteCompiler method deleteRows.
/**
* Handles client side deletion of rows for a DELETE statement. We determine the "best" plan to drive the query using
* our standard optimizer. The plan may be based on using an index, in which case we need to translate the index row
* key to get the data row key used to form the delete mutation. We always collect up the data table mutations, but we
* only collect and send the index mutations for global, immutable indexes. Local indexes and mutable indexes are always
* maintained on the server side.
* @param context StatementContext for the scan being executed
* @param iterator ResultIterator for the scan being executed
* @param bestPlan QueryPlan used to produce the iterator
* @param projectedTableRef TableRef containing all indexed and covered columns across all indexes on the data table
* @param otherTableRefs other TableRefs needed to be maintained apart from the one over which the scan is executing.
* Might be other index tables (if we're driving off of the data table table), the data table (if we're driving off of
* an index table), or a mix of the data table and additional index tables.
* @return MutationState representing the uncommitted data across the data table and indexes. Will be joined with the
* MutationState on the connection over which the delete is occurring.
* @throws SQLException
*/
private static MutationState deleteRows(StatementContext context, ResultIterator iterator, QueryPlan bestPlan, TableRef projectedTableRef, List<TableRef> otherTableRefs) throws SQLException {
RowProjector projector = bestPlan.getProjector();
TableRef tableRef = bestPlan.getTableRef();
PTable table = tableRef.getTable();
PhoenixStatement statement = context.getStatement();
PhoenixConnection connection = statement.getConnection();
PName tenantId = connection.getTenantId();
byte[] tenantIdBytes = null;
if (tenantId != null) {
tenantIdBytes = ScanUtil.getTenantIdBytes(table.getRowKeySchema(), table.getBucketNum() != null, tenantId, table.getViewIndexId() != null);
}
final boolean isAutoCommit = connection.getAutoCommit();
ConnectionQueryServices services = connection.getQueryServices();
final int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
final int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
final int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
MultiRowMutationState mutations = new MultiRowMutationState(batchSize);
List<MultiRowMutationState> otherMutations = null;
// can always get the data table row key from an index row key).
if (!otherTableRefs.isEmpty()) {
otherMutations = Lists.newArrayListWithExpectedSize(otherTableRefs.size());
for (int i = 0; i < otherTableRefs.size(); i++) {
otherMutations.add(new MultiRowMutationState(batchSize));
}
}
List<PColumn> pkColumns = table.getPKColumns();
boolean isMultiTenant = table.isMultiTenant() && tenantIdBytes != null;
boolean isSharedViewIndex = table.getViewIndexId() != null;
int offset = (table.getBucketNum() == null ? 0 : 1);
byte[][] values = new byte[pkColumns.size()][];
if (isSharedViewIndex) {
values[offset++] = MetaDataUtil.getViewIndexIdDataType().toBytes(table.getViewIndexId());
}
if (isMultiTenant) {
values[offset++] = tenantIdBytes;
}
try (final PhoenixResultSet rs = new PhoenixResultSet(iterator, projector, context)) {
ValueGetter getter = null;
if (!otherTableRefs.isEmpty()) {
getter = new ValueGetter() {
final ImmutableBytesWritable valuePtr = new ImmutableBytesWritable();
final ImmutableBytesWritable rowKeyPtr = new ImmutableBytesWritable();
@Override
public ImmutableBytesWritable getLatestValue(ColumnReference ref, long ts) throws IOException {
Cell cell = rs.getCurrentRow().getValue(ref.getFamily(), ref.getQualifier());
if (cell == null) {
return null;
}
valuePtr.set(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
return valuePtr;
}
@Override
public byte[] getRowKey() {
rs.getCurrentRow().getKey(rowKeyPtr);
return ByteUtil.copyKeyBytesIfNecessary(rowKeyPtr);
}
};
}
IndexMaintainer scannedIndexMaintainer = null;
IndexMaintainer[] maintainers = null;
PTable dataTable = table;
if (table.getType() == PTableType.INDEX) {
if (!otherTableRefs.isEmpty()) {
// The data table is always the last one in the list if it's
// not chosen as the best of the possible plans.
dataTable = otherTableRefs.get(otherTableRefs.size() - 1).getTable();
scannedIndexMaintainer = IndexMaintainer.create(dataTable, table, connection);
}
maintainers = new IndexMaintainer[otherTableRefs.size()];
for (int i = 0; i < otherTableRefs.size(); i++) {
// Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that client-side
// expressions are used instead of server-side ones.
PTable otherTable = otherTableRefs.get(i).getTable();
if (otherTable.getType() == PTableType.INDEX) {
// In this case, we'll convert from index row -> data row -> other index row
maintainers[i] = IndexMaintainer.create(dataTable, otherTable, connection);
} else {
maintainers[i] = scannedIndexMaintainer;
}
}
} else if (!otherTableRefs.isEmpty()) {
dataTable = table;
maintainers = new IndexMaintainer[otherTableRefs.size()];
for (int i = 0; i < otherTableRefs.size(); i++) {
// Create IndexMaintainer based on projected table (i.e. SELECT expressions) so that client-side
// expressions are used instead of server-side ones.
maintainers[i] = IndexMaintainer.create(projectedTableRef.getTable(), otherTableRefs.get(i).getTable(), connection);
}
}
byte[][] viewConstants = IndexUtil.getViewConstants(dataTable);
int rowCount = 0;
while (rs.next()) {
// allocate new as this is a key in a Map
ImmutableBytesPtr rowKeyPtr = new ImmutableBytesPtr();
rs.getCurrentRow().getKey(rowKeyPtr);
// Check for otherTableRefs being empty required when deleting directly from the index
if (otherTableRefs.isEmpty() || isMaintainedOnClient(table)) {
mutations.put(rowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
}
for (int i = 0; i < otherTableRefs.size(); i++) {
PTable otherTable = otherTableRefs.get(i).getTable();
// allocate new as this is a key in a Map
ImmutableBytesPtr otherRowKeyPtr = new ImmutableBytesPtr();
// Translate the data table row to the index table row
if (table.getType() == PTableType.INDEX) {
otherRowKeyPtr.set(scannedIndexMaintainer.buildDataRowKey(rowKeyPtr, viewConstants));
if (otherTable.getType() == PTableType.INDEX) {
otherRowKeyPtr.set(maintainers[i].buildRowKey(getter, otherRowKeyPtr, null, null, HConstants.LATEST_TIMESTAMP));
}
} else {
otherRowKeyPtr.set(maintainers[i].buildRowKey(getter, rowKeyPtr, null, null, HConstants.LATEST_TIMESTAMP));
}
otherMutations.get(i).put(otherRowKeyPtr, new RowMutationState(PRow.DELETE_MARKER, 0, statement.getConnection().getStatementExecutionCounter(), NULL_ROWTIMESTAMP_INFO, null));
}
if (mutations.size() > maxSize) {
throw new IllegalArgumentException("MutationState size of " + mutations.size() + " is bigger than max allowed size of " + maxSize);
}
rowCount++;
// Commit a batch if auto commit is true and we're at our batch size
if (isAutoCommit && rowCount % batchSize == 0) {
MutationState state = new MutationState(tableRef, mutations, 0, maxSize, maxSizeBytes, connection);
connection.getMutationState().join(state);
for (int i = 0; i < otherTableRefs.size(); i++) {
MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection);
connection.getMutationState().join(indexState);
}
connection.getMutationState().send();
mutations.clear();
if (otherMutations != null) {
for (MultiRowMutationState multiRowMutationState : otherMutations) {
multiRowMutationState.clear();
}
}
}
}
// If auto commit is true, this last batch will be committed upon return
int nCommittedRows = isAutoCommit ? (rowCount / batchSize * batchSize) : 0;
MutationState state = new MutationState(tableRef, mutations, nCommittedRows, maxSize, maxSizeBytes, connection);
for (int i = 0; i < otherTableRefs.size(); i++) {
MutationState indexState = new MutationState(otherTableRefs.get(i), otherMutations.get(i), 0, maxSize, maxSizeBytes, connection);
state.join(indexState);
}
return state;
}
}
use of org.apache.phoenix.execute.MutationState.MultiRowMutationState in project phoenix by apache.
the class UpsertCompiler method setValues.
private static void setValues(byte[][] values, int[] pkSlotIndex, int[] columnIndexes, PTable table, MultiRowMutationState mutation, PhoenixStatement statement, boolean useServerTimestamp, IndexMaintainer maintainer, byte[][] viewConstants, byte[] onDupKeyBytes, int numSplColumns) throws SQLException {
long columnValueSize = 0;
Map<PColumn, byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length);
byte[][] pkValues = new byte[table.getPKColumns().size()][];
// here and we will fill in the byte later in PRowImpl.
if (table.getBucketNum() != null) {
pkValues[0] = new byte[] { 0 };
}
for (int i = 0; i < numSplColumns; i++) {
pkValues[i + (table.getBucketNum() != null ? 1 : 0)] = values[i];
}
// case when the table doesn't have a row timestamp column
Long rowTimestamp = null;
RowTimestampColInfo rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp);
for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
byte[] value = values[j];
PColumn column = table.getColumns().get(columnIndexes[i]);
if (SchemaUtil.isPKColumn(column)) {
pkValues[pkSlotIndex[i]] = value;
if (SchemaUtil.getPKPosition(table, column) == table.getRowTimestampColPos()) {
if (!useServerTimestamp) {
PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos());
rowTimestamp = PLong.INSTANCE.getCodec().decodeLong(value, 0, rowTimestampCol.getSortOrder());
if (rowTimestamp < 0) {
throw new IllegalDataException("Value of a column designated as ROW_TIMESTAMP cannot be less than zero");
}
rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp);
}
}
} else {
columnValues.put(column, value);
columnValueSize += (column.getEstimatedSize() + value.length);
}
}
ImmutableBytesPtr ptr = new ImmutableBytesPtr();
table.newKey(ptr, pkValues);
if (table.getIndexType() == IndexType.LOCAL && maintainer != null) {
byte[] rowKey = maintainer.buildDataRowKey(ptr, viewConstants);
HRegionLocation region = statement.getConnection().getQueryServices().getTableRegionLocation(table.getParentName().getBytes(), rowKey);
byte[] regionPrefix = region.getRegionInfo().getStartKey().length == 0 ? new byte[region.getRegionInfo().getEndKey().length] : region.getRegionInfo().getStartKey();
if (regionPrefix.length != 0) {
ptr.set(ScanRanges.prefixKey(ptr.get(), 0, ptr.getLength(), regionPrefix, regionPrefix.length));
}
}
mutation.put(ptr, new RowMutationState(columnValues, columnValueSize, statement.getConnection().getStatementExecutionCounter(), rowTsColInfo, onDupKeyBytes));
}
use of org.apache.phoenix.execute.MutationState.MultiRowMutationState in project phoenix by apache.
the class UpsertCompiler method upsertSelect.
public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes, boolean useServerTimestamp, boolean prefixSysColValues) throws SQLException {
PhoenixStatement statement = childContext.getStatement();
PhoenixConnection connection = statement.getConnection();
ConnectionQueryServices services = connection.getQueryServices();
int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
int maxSizeBytes = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
boolean isAutoCommit = connection.getAutoCommit();
int numSplColumns = (tableRef.getTable().isMultiTenant() ? 1 : 0) + (tableRef.getTable().getViewIndexId() != null ? 1 : 0);
byte[][] values = new byte[columnIndexes.length + numSplColumns][];
if (prefixSysColValues) {
int i = 0;
if (tableRef.getTable().isMultiTenant()) {
values[i++] = connection.getTenantId().getBytes();
}
if (tableRef.getTable().getViewIndexId() != null) {
values[i++] = PSmallint.INSTANCE.toBytes(tableRef.getTable().getViewIndexId());
}
}
int rowCount = 0;
MultiRowMutationState mutation = new MultiRowMutationState(batchSize);
PTable table = tableRef.getTable();
IndexMaintainer indexMaintainer = null;
byte[][] viewConstants = null;
if (table.getIndexType() == IndexType.LOCAL) {
PTable parentTable = statement.getConnection().getMetaDataCache().getTableRef(new PTableKey(statement.getConnection().getTenantId(), table.getParentName().getString())).getTable();
indexMaintainer = table.getIndexMaintainer(parentTable, connection);
viewConstants = IndexUtil.getViewConstants(parentTable);
}
try (ResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
while (rs.next()) {
for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
PColumn column = table.getColumns().get(columnIndexes[i]);
byte[] bytes = rs.getBytes(i + 1);
ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes);
Object value = rs.getObject(i + 1);
int rsPrecision = rs.getMetaData().getPrecision(i + 1);
Integer precision = rsPrecision == 0 ? null : rsPrecision;
int rsScale = rs.getMetaData().getScale(i + 1);
Integer scale = rsScale == 0 ? null : rsScale;
// as we checked that before.
if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), SortOrder.getDefault(), precision, scale, column.getMaxLength(), column.getScale())) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(column.getName().getString()).setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build().buildException();
}
column.getDataType().coerceBytes(ptr, value, column.getDataType(), precision, scale, SortOrder.getDefault(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable());
values[j] = ByteUtil.copyKeyBytesIfNecessary(ptr);
}
setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement, useServerTimestamp, indexMaintainer, viewConstants, null, numSplColumns);
rowCount++;
// Commit a batch if auto commit is true and we're at our batch size
if (isAutoCommit && rowCount % batchSize == 0) {
MutationState state = new MutationState(tableRef, mutation, 0, maxSize, maxSizeBytes, connection);
connection.getMutationState().join(state);
connection.getMutationState().send();
mutation.clear();
}
}
// If auto commit is true, this last batch will be committed upon return
return new MutationState(tableRef, mutation, rowCount / batchSize * batchSize, maxSize, maxSizeBytes, connection);
}
}
Aggregations