Search in sources :

Example 11 with ServerMutation

use of org.apache.accumulo.server.data.ServerMutation in project accumulo by apache.

the class SortedLogRecoveryTest method testSkipSuccessfulCompactionAcrossFiles.

@Test
public void testSkipSuccessfulCompactionAcrossFiles() throws IOException {
    // Create a test log
    Mutation ignored = new ServerMutation(new Text("ignored"));
    ignored.put(cf, cq, value);
    Mutation m = new ServerMutation(new Text("row1"));
    m.put(cf, cq, value);
    KeyValue[] entries = new KeyValue[] { createKeyValue(OPEN, 0, -1, "1"), createKeyValue(DEFINE_TABLET, 1, 1, extent), createKeyValue(COMPACTION_START, 3, 1, "/t1/f1"), createKeyValue(MUTATION, 2, 1, ignored) };
    KeyValue[] entries2 = new KeyValue[] { createKeyValue(OPEN, 4, -1, "1"), createKeyValue(DEFINE_TABLET, 5, 1, extent), createKeyValue(COMPACTION_FINISH, 6, 1, null), createKeyValue(MUTATION, 7, 1, m) };
    Map<String, KeyValue[]> logs = new TreeMap<>();
    logs.put("entries", entries);
    logs.put("entries2", entries2);
    // Recover
    List<Mutation> mutations = recover(logs, extent);
    // Verify recovered data
    Assert.assertEquals(1, mutations.size());
    Assert.assertEquals(m, mutations.get(0));
}
Also used : ServerMutation(org.apache.accumulo.server.data.ServerMutation) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation) ServerMutation(org.apache.accumulo.server.data.ServerMutation) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Example 12 with ServerMutation

use of org.apache.accumulo.server.data.ServerMutation in project accumulo by apache.

the class SortedLogRecoveryTest method testBug1.

@Test
public void testBug1() throws IOException {
    // this unit test reproduces a bug that occurred, nothing should recover
    Mutation m1 = new ServerMutation(new Text("row1"));
    m1.put(cf, cq, value);
    Mutation m2 = new ServerMutation(new Text("row2"));
    m2.put(cf, cq, value);
    KeyValue[] entries = new KeyValue[] { createKeyValue(OPEN, 0, -1, "1"), createKeyValue(DEFINE_TABLET, 1, 1, extent), createKeyValue(COMPACTION_START, 30, 1, "/t1/f1"), createKeyValue(COMPACTION_FINISH, 32, 1, "/t1/f1"), createKeyValue(MUTATION, 29, 1, m1), createKeyValue(MUTATION, 30, 1, m2) };
    Map<String, KeyValue[]> logs = new TreeMap<>();
    logs.put("testlog", entries);
    // Recover
    List<Mutation> mutations = recover(logs, extent);
    // Verify recovered data
    Assert.assertEquals(0, mutations.size());
}
Also used : ServerMutation(org.apache.accumulo.server.data.ServerMutation) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation) ServerMutation(org.apache.accumulo.server.data.ServerMutation) TreeMap(java.util.TreeMap) Test(org.junit.Test)

Example 13 with ServerMutation

use of org.apache.accumulo.server.data.ServerMutation in project accumulo by apache.

the class LogFileTest method testReadFields.

@Test
public void testReadFields() throws IOException {
    LogFileKey key = new LogFileKey();
    LogFileValue value = new LogFileValue();
    key.tserverSession = "";
    readWrite(OPEN, -1, -1, null, null, null, key, value);
    assertEquals(key.event, OPEN);
    readWrite(COMPACTION_FINISH, 1, 2, null, null, null, key, value);
    assertEquals(key.event, COMPACTION_FINISH);
    assertEquals(key.seq, 1);
    assertEquals(key.tid, 2);
    readWrite(COMPACTION_START, 3, 4, "some file", null, null, key, value);
    assertEquals(key.event, COMPACTION_START);
    assertEquals(key.seq, 3);
    assertEquals(key.tid, 4);
    assertEquals(key.filename, "some file");
    KeyExtent tablet = new KeyExtent(Table.ID.of("table"), new Text("bbbb"), new Text("aaaa"));
    readWrite(DEFINE_TABLET, 5, 6, null, tablet, null, key, value);
    assertEquals(key.event, DEFINE_TABLET);
    assertEquals(key.seq, 5);
    assertEquals(key.tid, 6);
    assertEquals(key.tablet, tablet);
    Mutation m = new ServerMutation(new Text("row"));
    m.put(new Text("cf"), new Text("cq"), new Value("value".getBytes()));
    readWrite(MUTATION, 7, 8, null, null, new Mutation[] { m }, key, value);
    assertEquals(key.event, MUTATION);
    assertEquals(key.seq, 7);
    assertEquals(key.tid, 8);
    assertEquals(value.mutations, Arrays.asList(m));
    m = new ServerMutation(new Text("row"));
    m.put(new Text("cf"), new Text("cq"), new ColumnVisibility("vis"), 12345, new Value("value".getBytes()));
    m.put(new Text("cf"), new Text("cq"), new ColumnVisibility("vis2"), new Value("value".getBytes()));
    m.putDelete(new Text("cf"), new Text("cq"), new ColumnVisibility("vis2"));
    readWrite(MUTATION, 8, 9, null, null, new Mutation[] { m }, key, value);
    assertEquals(key.event, MUTATION);
    assertEquals(key.seq, 8);
    assertEquals(key.tid, 9);
    assertEquals(value.mutations, Arrays.asList(m));
    readWrite(MANY_MUTATIONS, 9, 10, null, null, new Mutation[] { m, m }, key, value);
    assertEquals(key.event, MANY_MUTATIONS);
    assertEquals(key.seq, 9);
    assertEquals(key.tid, 10);
    assertEquals(value.mutations, Arrays.asList(m, m));
}
Also used : Value(org.apache.accumulo.core.data.Value) ServerMutation(org.apache.accumulo.server.data.ServerMutation) Text(org.apache.hadoop.io.Text) ServerMutation(org.apache.accumulo.server.data.ServerMutation) Mutation(org.apache.accumulo.core.data.Mutation) ColumnVisibility(org.apache.accumulo.core.security.ColumnVisibility) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Test(org.junit.Test)

Example 14 with ServerMutation

use of org.apache.accumulo.server.data.ServerMutation in project accumulo by apache.

the class AccumuloReplicaSystemTest method restartInFileKnowsAboutPreviousTableDefines.

@Test
public void restartInFileKnowsAboutPreviousTableDefines() throws Exception {
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    DataOutputStream dos = new DataOutputStream(baos);
    LogFileKey key = new LogFileKey();
    LogFileValue value = new LogFileValue();
    // What is seq used for?
    key.seq = 1l;
    /*
     * Disclaimer: the following series of LogFileKey and LogFileValue pairs have *no* bearing whatsoever in reality regarding what these entries would actually
     * look like in a WAL. They are solely for testing that each LogEvents is handled, order is not important.
     */
    key.event = LogEvents.DEFINE_TABLET;
    key.tablet = new KeyExtent(Table.ID.of("1"), null, null);
    key.tid = 1;
    key.write(dos);
    value.write(dos);
    key.tablet = null;
    key.event = LogEvents.MUTATION;
    key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
    key.write(dos);
    value.write(dos);
    key.tablet = null;
    key.event = LogEvents.MUTATION;
    key.tid = 1;
    key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
    value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
    key.write(dos);
    value.write(dos);
    dos.close();
    Map<String, String> confMap = new HashMap<>();
    confMap.put(Property.REPLICATION_NAME.getKey(), "source");
    AccumuloConfiguration conf = new ConfigurationCopy(confMap);
    AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
    ars.setConf(conf);
    Status status = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
    DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
    HashSet<Integer> tids = new HashSet<>();
    // Only consume the first mutation, not the second
    WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, 1l, tids);
    // We stopped because we got to the end of the file
    Assert.assertEquals(2, repl.entriesConsumed);
    Assert.assertEquals(1, repl.walEdits.getEditsSize());
    Assert.assertEquals(1, repl.sizeInRecords);
    Assert.assertNotEquals(0, repl.sizeInBytes);
    status = Status.newBuilder(status).setBegin(2).build();
    // Consume the rest of the mutations
    repl = ars.getWalEdits(new ReplicationTarget("peer", "1", Table.ID.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, 1l, tids);
    // We stopped because we got to the end of the file
    Assert.assertEquals(1, repl.entriesConsumed);
    Assert.assertEquals(1, repl.walEdits.getEditsSize());
    Assert.assertEquals(1, repl.sizeInRecords);
    Assert.assertNotEquals(0, repl.sizeInBytes);
}
Also used : Status(org.apache.accumulo.server.replication.proto.Replication.Status) Path(org.apache.hadoop.fs.Path) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) HashMap(java.util.HashMap) DataOutputStream(java.io.DataOutputStream) WalReplication(org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.WalReplication) ServerMutation(org.apache.accumulo.server.data.ServerMutation) Text(org.apache.hadoop.io.Text) ByteArrayOutputStream(java.io.ByteArrayOutputStream) LogFileKey(org.apache.accumulo.tserver.logger.LogFileKey) DataInputStream(java.io.DataInputStream) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) ByteArrayInputStream(java.io.ByteArrayInputStream) LogFileValue(org.apache.accumulo.tserver.logger.LogFileValue) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 15 with ServerMutation

use of org.apache.accumulo.server.data.ServerMutation in project accumulo by apache.

the class TabletTime method setSystemTimes.

protected void setSystemTimes(Mutation mutation, long lastCommitTime) {
    ServerMutation m = (ServerMutation) mutation;
    m.setSystemTimestamp(lastCommitTime);
}
Also used : ServerMutation(org.apache.accumulo.server.data.ServerMutation)

Aggregations

ServerMutation (org.apache.accumulo.server.data.ServerMutation)30 Test (org.junit.Test)26 Mutation (org.apache.accumulo.core.data.Mutation)24 Text (org.apache.hadoop.io.Text)22 TreeMap (java.util.TreeMap)17 LogFileKey (org.apache.accumulo.tserver.logger.LogFileKey)7 LogFileValue (org.apache.accumulo.tserver.logger.LogFileValue)7 DataOutputStream (java.io.DataOutputStream)6 ByteArrayOutputStream (java.io.ByteArrayOutputStream)5 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)5 DataInputStream (java.io.DataInputStream)4 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)4 Value (org.apache.accumulo.core.data.Value)4 Status (org.apache.accumulo.server.replication.proto.Replication.Status)4 Path (org.apache.hadoop.fs.Path)4 ByteArrayInputStream (java.io.ByteArrayInputStream)3 HashMap (java.util.HashMap)3 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)3 ConfigurationCopy (org.apache.accumulo.core.conf.ConfigurationCopy)3 ReplicationTarget (org.apache.accumulo.core.replication.ReplicationTarget)3