use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class AppendOnlySchemaIT method testTableWithSameSchema.
private void testTableWithSameSchema(boolean notExists, boolean sameClient) throws Exception {
// use a spyed ConnectionQueryServices so we can verify calls to getTable
ConnectionQueryServices connectionQueryServices = Mockito.spy(driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)));
Properties props = new Properties();
props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
try (Connection conn1 = connectionQueryServices.connect(getUrl(), props);
Connection conn2 = sameClient ? conn1 : connectionQueryServices.connect(getUrl(), props)) {
String metricTableName = generateUniqueName();
String viewName = generateUniqueName();
String metricIdSeqTableName = generateUniqueName();
// create sequence for auto partition
conn1.createStatement().execute("CREATE SEQUENCE " + metricIdSeqTableName + " CACHE 1");
// create base table
conn1.createStatement().execute("CREATE TABLE " + metricTableName + "(metricId INTEGER NOT NULL, metricVal DOUBLE, CONSTRAINT PK PRIMARY KEY(metricId))" + " APPEND_ONLY_SCHEMA = true, UPDATE_CACHE_FREQUENCY=1, AUTO_PARTITION_SEQ=" + metricIdSeqTableName);
// create view
String ddl = "CREATE VIEW " + (notExists ? "IF NOT EXISTS " : "") + viewName + " ( hostName varchar NOT NULL, tagName varChar" + " CONSTRAINT HOSTNAME_PK PRIMARY KEY (hostName))" + " AS SELECT * FROM " + metricTableName + " UPDATE_CACHE_FREQUENCY=300000";
conn1.createStatement().execute(ddl);
conn1.createStatement().execute("UPSERT INTO " + viewName + "(hostName, metricVal) VALUES('host1', 1.0)");
conn1.commit();
reset(connectionQueryServices);
// execute same create ddl
try {
conn2.createStatement().execute(ddl);
if (!notExists) {
fail("Create Table should fail");
}
} catch (TableAlreadyExistsException e) {
if (notExists) {
fail("Create Table should not fail");
}
}
// verify getTable rpcs
verify(connectionQueryServices, sameClient ? never() : times(1)).getTable((PName) isNull(), eq(new byte[0]), eq(Bytes.toBytes(viewName)), anyLong(), anyLong());
// verify no create table rpcs
verify(connectionQueryServices, never()).createTable(anyListOf(Mutation.class), any(byte[].class), any(PTableType.class), anyMap(), anyList(), any(byte[][].class), eq(false), eq(false));
reset(connectionQueryServices);
// execute alter table ddl that adds the same column
ddl = "ALTER VIEW " + viewName + " ADD " + (notExists ? "IF NOT EXISTS" : "") + " tagName varchar";
try {
conn2.createStatement().execute(ddl);
if (!notExists) {
fail("Alter Table should fail");
}
} catch (ColumnAlreadyExistsException e) {
if (notExists) {
fail("Alter Table should not fail");
}
}
// if not verify exists is true one call to add column table with empty mutation list (which does not make a rpc)
// else verify no add column calls
verify(connectionQueryServices, notExists ? times(1) : never()).addColumn(eq(Collections.<Mutation>emptyList()), any(PTable.class), anyMap(), anySetOf(String.class), anyListOf(PColumn.class));
// upsert one row
conn2.createStatement().execute("UPSERT INTO " + viewName + "(hostName, metricVal) VALUES('host2', 2.0)");
conn2.commit();
// verify data in base table
ResultSet rs = conn2.createStatement().executeQuery("SELECT * from " + metricTableName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(1.0, rs.getDouble(2), 1e-6);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(2.0, rs.getDouble(2), 1e-6);
assertFalse(rs.next());
// verify data in view
rs = conn2.createStatement().executeQuery("SELECT * from " + viewName);
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(1.0, rs.getDouble(2), 1e-6);
assertEquals("host1", rs.getString(3));
assertTrue(rs.next());
assertEquals(1, rs.getInt(1));
assertEquals(2.0, rs.getDouble(2), 1e-6);
assertEquals("host2", rs.getString(3));
assertFalse(rs.next());
}
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class PhoenixIndexBuilder method convertIncrementToPutInSingletonList.
private static List<Mutation> convertIncrementToPutInSingletonList(Increment inc) {
byte[] rowKey = inc.getRow();
Put put = new Put(rowKey);
transferCells(inc, put);
transferAttributes(inc, put);
return Collections.<Mutation>singletonList(put);
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class PhoenixIndexBuilder method executeAtomicOp.
@Override
public List<Mutation> executeAtomicOp(Increment inc) throws IOException {
byte[] opBytes = inc.getAttribute(ATOMIC_OP_ATTRIB);
if (opBytes == null) {
// Unexpected
return null;
}
inc.setAttribute(ATOMIC_OP_ATTRIB, null);
Put put = null;
Delete delete = null;
// We cannot neither use the time stamp in the Increment to set the Get time range
// nor set the Put/Delete time stamp and have this be atomic as HBase does not
// handle that. Though we disallow using ON DUPLICATE KEY clause when the
// CURRENT_SCN is set, we still may have a time stamp set as of when the table
// was resolved on the client side. We need to ignore this as well due to limitations
// in HBase, but this isn't too bad as the time will be very close the the current
// time anyway.
long ts = HConstants.LATEST_TIMESTAMP;
byte[] rowKey = inc.getRow();
final Get get = new Get(rowKey);
if (isDupKeyIgnore(opBytes)) {
get.setFilter(new FirstKeyOnlyFilter());
Result result = this.env.getRegion().get(get);
return result.isEmpty() ? convertIncrementToPutInSingletonList(inc) : Collections.<Mutation>emptyList();
}
ByteArrayInputStream stream = new ByteArrayInputStream(opBytes);
DataInputStream input = new DataInputStream(stream);
boolean skipFirstOp = input.readBoolean();
short repeat = input.readShort();
final int[] estimatedSizeHolder = { 0 };
List<Pair<PTable, List<Expression>>> operations = Lists.newArrayListWithExpectedSize(3);
while (true) {
ExpressionVisitor<Void> visitor = new StatelessTraverseAllExpressionVisitor<Void>() {
@Override
public Void visit(KeyValueColumnExpression expression) {
get.addColumn(expression.getColumnFamily(), expression.getColumnQualifier());
estimatedSizeHolder[0]++;
return null;
}
};
try {
int nExpressions = WritableUtils.readVInt(input);
List<Expression> expressions = Lists.newArrayListWithExpectedSize(nExpressions);
for (int i = 0; i < nExpressions; i++) {
Expression expression = ExpressionType.values()[WritableUtils.readVInt(input)].newInstance();
expression.readFields(input);
expressions.add(expression);
expression.accept(visitor);
}
PTableProtos.PTable tableProto = PTableProtos.PTable.parseDelimitedFrom(input);
PTable table = PTableImpl.createFromProto(tableProto);
operations.add(new Pair<>(table, expressions));
} catch (EOFException e) {
break;
}
}
int estimatedSize = estimatedSizeHolder[0];
if (get.getFamilyMap().isEmpty()) {
get.setFilter(new FirstKeyOnlyFilter());
}
MultiKeyValueTuple tuple;
List<Cell> flattenedCells = null;
List<Cell> cells = ((HRegion) this.env.getRegion()).get(get, false);
if (cells.isEmpty()) {
if (skipFirstOp) {
if (operations.size() <= 1 && repeat <= 1) {
return convertIncrementToPutInSingletonList(inc);
}
// Skip first operation (if first wasn't ON DUPLICATE KEY IGNORE)
repeat--;
}
// Base current state off of new row
flattenedCells = flattenCells(inc, estimatedSize);
tuple = new MultiKeyValueTuple(flattenedCells);
} else {
// Base current state off of existing row
tuple = new MultiKeyValueTuple(cells);
}
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
for (int opIndex = 0; opIndex < operations.size(); opIndex++) {
Pair<PTable, List<Expression>> operation = operations.get(opIndex);
PTable table = operation.getFirst();
List<Expression> expressions = operation.getSecond();
for (int j = 0; j < repeat; j++) {
// repeater loop
ptr.set(rowKey);
// executed, not when the outer loop is exited. Hence we do it here, at the top of the loop.
if (flattenedCells != null) {
Collections.sort(flattenedCells, KeyValue.COMPARATOR);
}
PRow row = table.newRow(GenericKeyValueBuilder.INSTANCE, ts, ptr, false);
for (int i = 0; i < expressions.size(); i++) {
Expression expression = expressions.get(i);
ptr.set(ByteUtil.EMPTY_BYTE_ARRAY);
expression.evaluate(tuple, ptr);
PColumn column = table.getColumns().get(i + 1);
Object value = expression.getDataType().toObject(ptr, column.getSortOrder());
// same type.
if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), expression.getSortOrder(), expression.getMaxLength(), expression.getScale(), column.getMaxLength(), column.getScale())) {
throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(), column.getScale());
}
column.getDataType().coerceBytes(ptr, value, expression.getDataType(), expression.getMaxLength(), expression.getScale(), expression.getSortOrder(), column.getMaxLength(), column.getScale(), column.getSortOrder(), table.rowKeyOrderOptimizable());
byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
row.setValue(column, bytes);
}
flattenedCells = Lists.newArrayListWithExpectedSize(estimatedSize);
List<Mutation> mutations = row.toRowMutations();
for (Mutation source : mutations) {
flattenCells(source, flattenedCells);
}
tuple.setKeyValues(flattenedCells);
}
// Repeat only applies to first statement
repeat = 1;
}
List<Mutation> mutations = Lists.newArrayListWithExpectedSize(2);
for (int i = 0; i < tuple.size(); i++) {
Cell cell = tuple.getValue(i);
if (Type.codeToType(cell.getTypeByte()) == Type.Put) {
if (put == null) {
put = new Put(rowKey);
transferAttributes(inc, put);
mutations.add(put);
}
put.add(cell);
} else {
if (delete == null) {
delete = new Delete(rowKey);
transferAttributes(inc, delete);
mutations.add(delete);
}
delete.addDeleteMarker(cell);
}
}
return mutations;
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class PhoenixTransactionalIndexer method getIndexUpdates.
private Collection<Pair<Mutation, byte[]>> getIndexUpdates(RegionCoprocessorEnvironment env, PhoenixIndexMetaData indexMetaData, Iterator<Mutation> mutationIterator, byte[] txRollbackAttribute) throws IOException {
Transaction tx = indexMetaData.getTransaction();
if (tx == null) {
throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString());
}
boolean isRollback = txRollbackAttribute != null;
boolean isImmutable = indexMetaData.isImmutableRows();
ResultScanner currentScanner = null;
TransactionAwareHTable txTable = null;
// Collect up all mutations in batch
Map<ImmutableBytesPtr, MultiMutation> mutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
Map<ImmutableBytesPtr, MultiMutation> findPriorValueMutations;
if (isImmutable && !isRollback) {
findPriorValueMutations = new HashMap<ImmutableBytesPtr, MultiMutation>();
} else {
findPriorValueMutations = mutations;
}
while (mutationIterator.hasNext()) {
Mutation m = mutationIterator.next();
// add the mutation to the batch set
ImmutableBytesPtr row = new ImmutableBytesPtr(m.getRow());
if (mutations != findPriorValueMutations && isDeleteMutation(m)) {
addMutation(findPriorValueMutations, row, m);
}
addMutation(mutations, row, m);
}
// Collect the set of mutable ColumnReferences so that we can first
// run a scan to get the current state. We'll need this to delete
// the existing index rows.
List<IndexMaintainer> indexMaintainers = indexMetaData.getIndexMaintainers();
int estimatedSize = indexMaintainers.size() * 10;
Set<ColumnReference> mutableColumns = Sets.newHashSetWithExpectedSize(estimatedSize);
for (IndexMaintainer indexMaintainer : indexMaintainers) {
// For transactional tables, we use an index maintainer
// to aid in rollback if there's a KeyValue column in the index. The alternative would be
// to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
// client side.
Set<ColumnReference> allColumns = indexMaintainer.getAllColumns();
mutableColumns.addAll(allColumns);
}
Collection<Pair<Mutation, byte[]>> indexUpdates = new ArrayList<Pair<Mutation, byte[]>>(mutations.size() * 2 * indexMaintainers.size());
try {
// this logic will work there too.
if (!findPriorValueMutations.isEmpty()) {
List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
for (ImmutableBytesPtr ptr : findPriorValueMutations.keySet()) {
keys.add(PVarbinary.INSTANCE.getKeyRange(ptr.copyBytesIfNecessary()));
}
Scan scan = new Scan();
// Project all mutable columns
for (ColumnReference ref : mutableColumns) {
scan.addColumn(ref.getFamily(), ref.getQualifier());
}
/*
* Indexes inherit the storage scheme of the data table which means all the indexes have the same
* storage scheme and empty key value qualifier. Note that this assumption would be broken if we start
* supporting new indexes over existing data tables to have a different storage scheme than the data
* table.
*/
byte[] emptyKeyValueQualifier = indexMaintainers.get(0).getEmptyKeyValueQualifier();
// Project empty key value column
scan.addColumn(indexMaintainers.get(0).getDataEmptyKeyValueCF(), emptyKeyValueQualifier);
ScanRanges scanRanges = ScanRanges.create(SchemaUtil.VAR_BINARY_SCHEMA, Collections.singletonList(keys), ScanUtil.SINGLE_COLUMN_SLOT_SPAN, KeyRange.EVERYTHING_RANGE, null, true, -1);
scanRanges.initializeScan(scan);
TableName tableName = env.getRegion().getRegionInfo().getTable();
HTableInterface htable = env.getTable(tableName);
txTable = new TransactionAwareHTable(htable);
txTable.startTx(tx);
// For rollback, we need to see all versions, including
// the last committed version as there may be multiple
// checkpointed versions.
SkipScanFilter filter = scanRanges.getSkipScanFilter();
if (isRollback) {
filter = new SkipScanFilter(filter, true);
tx.setVisibility(VisibilityLevel.SNAPSHOT_ALL);
}
scan.setFilter(filter);
currentScanner = txTable.getScanner(scan);
}
if (isRollback) {
processRollback(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations);
} else {
processMutation(env, indexMetaData, txRollbackAttribute, currentScanner, tx, mutableColumns, indexUpdates, mutations, findPriorValueMutations);
}
} finally {
if (txTable != null)
txTable.close();
}
return indexUpdates;
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class PhoenixIndexFailurePolicy method getLocalIndexNames.
private Collection<? extends String> getLocalIndexNames(HTableInterfaceReference ref, Collection<Mutation> mutations) throws IOException {
Set<String> indexTableNames = new HashSet<String>(1);
PhoenixConnection conn = null;
try {
conn = QueryUtil.getConnectionOnServer(this.env.getConfiguration()).unwrap(PhoenixConnection.class);
PTable dataTable = PhoenixRuntime.getTableNoCache(conn, ref.getTableName());
List<PTable> indexes = dataTable.getIndexes();
// local index used to get view id from index mutation row key.
PTable localIndex = null;
Map<ImmutableBytesWritable, String> localIndexNames = new HashMap<ImmutableBytesWritable, String>();
for (PTable index : indexes) {
if (index.getIndexType() == IndexType.LOCAL && index.getIndexState() == PIndexState.ACTIVE) {
if (localIndex == null)
localIndex = index;
localIndexNames.put(new ImmutableBytesWritable(MetaDataUtil.getViewIndexIdDataType().toBytes(index.getViewIndexId())), index.getName().getString());
}
}
if (localIndex == null) {
return Collections.emptySet();
}
IndexMaintainer indexMaintainer = localIndex.getIndexMaintainer(dataTable, conn);
HRegionInfo regionInfo = this.env.getRegion().getRegionInfo();
int offset = regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length : regionInfo.getStartKey().length;
byte[] viewId = null;
for (Mutation mutation : mutations) {
viewId = indexMaintainer.getViewIndexIdFromIndexRowKey(new ImmutableBytesWritable(mutation.getRow(), offset, mutation.getRow().length - offset));
String indexTableName = localIndexNames.get(new ImmutableBytesWritable(viewId));
indexTableNames.add(indexTableName);
}
} catch (ClassNotFoundException e) {
throw new IOException(e);
} catch (SQLException e) {
throw new IOException(e);
} finally {
if (conn != null) {
try {
conn.close();
} catch (SQLException e) {
throw new IOException(e);
}
}
}
return indexTableNames;
}
Aggregations