use of org.apache.accumulo.core.data.Mutation in project hive by apache.
the class AccumuloTestSetup method createAccumuloTable.
protected void createAccumuloTable(Connector conn) throws TableExistsException, TableNotFoundException, AccumuloException, AccumuloSecurityException {
TableOperations tops = conn.tableOperations();
if (tops.exists(TABLE_NAME)) {
tops.delete(TABLE_NAME);
}
tops.create(TABLE_NAME);
boolean[] booleans = new boolean[] { true, false, true };
byte[] bytes = new byte[] { Byte.MIN_VALUE, -1, Byte.MAX_VALUE };
short[] shorts = new short[] { Short.MIN_VALUE, -1, Short.MAX_VALUE };
int[] ints = new int[] { Integer.MIN_VALUE, -1, Integer.MAX_VALUE };
long[] longs = new long[] { Long.MIN_VALUE, -1, Long.MAX_VALUE };
String[] strings = new String[] { "Hadoop, Accumulo", "Hive", "Test Strings" };
float[] floats = new float[] { Float.MIN_VALUE, -1.0F, Float.MAX_VALUE };
double[] doubles = new double[] { Double.MIN_VALUE, -1.0, Double.MAX_VALUE };
HiveDecimal[] decimals = new HiveDecimal[] { HiveDecimal.create("3.14159"), HiveDecimal.create("2.71828"), HiveDecimal.create("0.57721") };
Date[] dates = new Date[] { Date.valueOf("2014-01-01"), Date.valueOf("2014-03-01"), Date.valueOf("2014-05-01") };
Timestamp[] timestamps = new Timestamp[] { new Timestamp(50), new Timestamp(100), new Timestamp(150) };
BatchWriter bw = conn.createBatchWriter(TABLE_NAME, new BatchWriterConfig());
final String cf = "cf";
try {
for (int i = 0; i < 3; i++) {
Mutation m = new Mutation("key-" + i);
m.put(cf, "cq-boolean", Boolean.toString(booleans[i]));
m.put(cf.getBytes(), "cq-byte".getBytes(), new byte[] { bytes[i] });
m.put(cf, "cq-short", Short.toString(shorts[i]));
m.put(cf, "cq-int", Integer.toString(ints[i]));
m.put(cf, "cq-long", Long.toString(longs[i]));
m.put(cf, "cq-string", strings[i]);
m.put(cf, "cq-float", Float.toString(floats[i]));
m.put(cf, "cq-double", Double.toString(doubles[i]));
m.put(cf, "cq-decimal", decimals[i].toString());
m.put(cf, "cq-date", dates[i].toString());
m.put(cf, "cq-timestamp", timestamps[i].toString());
bw.addMutation(m);
}
} finally {
bw.close();
}
}
use of org.apache.accumulo.core.data.Mutation in project presto by prestodb.
the class Indexer method addIndexMutation.
private void addIndexMutation(ByteBuffer row, ByteBuffer family, ColumnVisibility visibility, byte[] qualifier) {
// Create the mutation and add it to the batch writer
Mutation indexMutation = new Mutation(row.array());
indexMutation.put(family.array(), qualifier, visibility, EMPTY_BYTES);
try {
indexWriter.addMutation(indexMutation);
} catch (MutationsRejectedException e) {
throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation rejected by server", e);
}
// Increment the cardinality metrics for this value of index
// metrics is a mapping of row ID to column family
MetricsKey key = new MetricsKey(row, family, visibility);
AtomicLong count = metrics.get(key);
if (count == null) {
count = new AtomicLong(0);
metrics.put(key, count);
}
count.incrementAndGet();
}
use of org.apache.accumulo.core.data.Mutation in project presto by prestodb.
the class TestIndexer method setupClass.
@BeforeClass
public void setupClass() throws Exception {
AccumuloColumnHandle c1 = new AccumuloColumnHandle("id", Optional.empty(), Optional.empty(), VARCHAR, 0, "", false);
AccumuloColumnHandle c2 = new AccumuloColumnHandle("age", Optional.of("cf"), Optional.of("age"), BIGINT, 1, "", true);
AccumuloColumnHandle c3 = new AccumuloColumnHandle("firstname", Optional.of("cf"), Optional.of("firstname"), VARCHAR, 2, "", true);
AccumuloColumnHandle c4 = new AccumuloColumnHandle("arr", Optional.of("cf"), Optional.of("arr"), new ArrayType(VARCHAR), 3, "", true);
table = new AccumuloTable("default", "index_test_table", ImmutableList.of(c1, c2, c3, c4), "id", true, LexicoderRowSerializer.class.getCanonicalName(), null);
m1 = new Mutation(M1_ROWID);
m1.put(CF, AGE, AGE_VALUE);
m1.put(CF, FIRSTNAME, M1_FNAME_VALUE);
m1.put(CF, SENDERS, M1_ARR_VALUE);
m2 = new Mutation(M2_ROWID);
m2.put(CF, AGE, AGE_VALUE);
m2.put(CF, FIRSTNAME, M2_FNAME_VALUE);
m2.put(CF, SENDERS, M2_ARR_VALUE);
ColumnVisibility visibility1 = new ColumnVisibility("private");
ColumnVisibility visibility2 = new ColumnVisibility("moreprivate");
m1v = new Mutation(M1_ROWID);
m1v.put(CF, AGE, visibility1, AGE_VALUE);
m1v.put(CF, FIRSTNAME, visibility1, M1_FNAME_VALUE);
m1v.put(CF, SENDERS, visibility2, M1_ARR_VALUE);
m2v = new Mutation(M2_ROWID);
m2v.put(CF, AGE, visibility1, AGE_VALUE);
m2v.put(CF, FIRSTNAME, visibility2, M2_FNAME_VALUE);
m2v.put(CF, SENDERS, visibility2, M2_ARR_VALUE);
}
use of org.apache.accumulo.core.data.Mutation in project presto by prestodb.
the class AbstractTestAccumuloRowSerializer method deserializeData.
protected void deserializeData(AccumuloRowSerializer serializer, byte[] data) throws Exception {
Mutation m = new Mutation("row");
m.put(b("a"), b("a"), data);
Key key = new Key(b("row"), b("a"), b("b"), b(), 0, false);
Value value = new Value(data);
serializer.setMapping(COLUMN_NAME, "a", "b");
serializer.deserialize(new SimpleImmutableEntry<>(key, value));
}
use of org.apache.accumulo.core.data.Mutation in project YCSB by brianfrankcooper.
the class AccumuloClient method deleteRow.
/**
* Deletes a row, given a Scanner of JUST that row.
*/
private void deleteRow(Scanner scanner) throws MutationsRejectedException {
Mutation deleter = null;
// iterate through the keys
for (Entry<Key, Value> entry : scanner) {
// create a mutation for the row
if (deleter == null) {
deleter = new Mutation(entry.getKey().getRow());
}
// the remove function adds the key with the delete flag set to true
deleter.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier());
}
bw.addMutation(deleter);
}
Aggregations