use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class ConnectionQueryServicesImpl method dropFunction.
@Override
public MetaDataMutationResult dropFunction(final List<Mutation> functionData, final boolean ifExists) throws SQLException {
byte[][] rowKeyMetadata = new byte[2][];
byte[] key = functionData.get(0).getRow();
SchemaUtil.getVarChars(key, rowKeyMetadata);
byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
byte[] functionBytes = rowKeyMetadata[PhoenixDatabaseMetaData.FUNTION_NAME_INDEX];
byte[] functionKey = SchemaUtil.getFunctionKey(tenantIdBytes, functionBytes);
final MetaDataMutationResult result = metaDataCoprocessorExec(functionKey, new Batch.Call<MetaDataService, MetaDataResponse>() {
@Override
public MetaDataResponse call(MetaDataService instance) throws IOException {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
DropFunctionRequest.Builder builder = DropFunctionRequest.newBuilder();
for (Mutation m : functionData) {
MutationProto mp = ProtobufUtil.toProto(m);
builder.addTableMetadataMutations(mp.toByteString());
}
builder.setIfExists(ifExists);
builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, PHOENIX_MINOR_VERSION, PHOENIX_PATCH_NUMBER));
instance.dropFunction(controller, builder.build(), rpcCallback);
if (controller.getFailedOn() != null) {
throw controller.getFailedOn();
}
return rpcCallback.get();
}
}, PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES);
return result;
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class IndexUtil method setIndexDisableTimeStamp.
public static MetaDataMutationResult setIndexDisableTimeStamp(String indexTableName, long minTimeStamp, HTableInterface metaTable, PIndexState newState) throws ServiceException, Throwable {
byte[] indexTableKey = SchemaUtil.getTableKeyFromFullName(indexTableName);
// Mimic the Put that gets generated by the client on an update of the index state
Put put = new Put(indexTableKey);
put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_STATE_BYTES, newState.getSerializedBytes());
put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(minTimeStamp));
put.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, PhoenixDatabaseMetaData.ASYNC_REBUILD_TIMESTAMP_BYTES, PLong.INSTANCE.toBytes(0));
final List<Mutation> tableMetadata = Collections.<Mutation>singletonList(put);
final Map<byte[], MetaDataResponse> results = metaTable.coprocessorService(MetaDataService.class, indexTableKey, indexTableKey, new Batch.Call<MetaDataService, MetaDataResponse>() {
@Override
public MetaDataResponse call(MetaDataService instance) throws IOException {
ServerRpcController controller = new ServerRpcController();
BlockingRpcCallback<MetaDataResponse> rpcCallback = new BlockingRpcCallback<MetaDataResponse>();
UpdateIndexStateRequest.Builder builder = UpdateIndexStateRequest.newBuilder();
for (Mutation m : tableMetadata) {
MutationProto mp = ProtobufUtil.toProto(m);
builder.addTableMetadataMutations(mp.toByteString());
}
instance.updateIndexState(controller, builder.build(), rpcCallback);
if (controller.getFailedOn() != null) {
throw controller.getFailedOn();
}
return rpcCallback.get();
}
});
if (results.isEmpty()) {
throw new IOException("Didn't get expected result size");
}
MetaDataResponse tmpResponse = results.values().iterator().next();
return MetaDataMutationResult.constructFromProto(tmpResponse);
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class ScanUtil method newScanRanges.
public static ScanRanges newScanRanges(List<? extends Mutation> mutations) throws SQLException {
List<KeyRange> keys = Lists.newArrayListWithExpectedSize(mutations.size());
for (Mutation m : mutations) {
keys.add(PVarbinary.INSTANCE.getKeyRange(m.getRow()));
}
ScanRanges keyRanges = ScanRanges.createPointLookup(keys);
return keyRanges;
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class MetaDataUtil method getParentTableName.
public static byte[] getParentTableName(List<Mutation> tableMetadata) {
if (tableMetadata.size() == 1) {
return null;
}
byte[][] rowKeyMetaData = new byte[3][];
getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
byte[] schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
byte[] tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
Mutation m = getParentTableHeaderRow(tableMetadata);
getVarChars(m.getRow(), 3, rowKeyMetaData);
if (Bytes.compareTo(schemaName, rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX]) == 0 && Bytes.compareTo(tableName, rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]) == 0) {
return null;
}
return rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
}
use of org.apache.hadoop.hbase.client.Mutation in project phoenix by apache.
the class PhoenixRuntime method getUncommittedDataIterator.
/**
* Get the list of uncommitted KeyValues for the connection. Currently used to write an
* Phoenix-compliant HFile from a map/reduce job.
* @param conn an open JDBC connection
* @return the list of HBase mutations for uncommitted data
* @throws SQLException
*/
public static Iterator<Pair<byte[], List<KeyValue>>> getUncommittedDataIterator(Connection conn, boolean includeMutableIndexes) throws SQLException {
final PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
final Iterator<Pair<byte[], List<Mutation>>> iterator = pconn.getMutationState().toMutations(includeMutableIndexes);
return new Iterator<Pair<byte[], List<KeyValue>>>() {
@Override
public boolean hasNext() {
return iterator.hasNext();
}
@Override
public Pair<byte[], List<KeyValue>> next() {
Pair<byte[], List<Mutation>> pair = iterator.next();
// Guess-timate 5 key values per row
List<KeyValue> keyValues = Lists.newArrayListWithExpectedSize(pair.getSecond().size() * 5);
for (Mutation mutation : pair.getSecond()) {
for (List<Cell> keyValueList : mutation.getFamilyCellMap().values()) {
for (Cell keyValue : keyValueList) {
keyValues.add(org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(keyValue));
}
}
}
Collections.sort(keyValues, pconn.getKeyValueBuilder().getKeyValueComparator());
return new Pair<byte[], List<KeyValue>>(pair.getFirst(), keyValues);
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
Aggregations