use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class SortedLogRecoveryTest method testMultipleTablets.
@Test
public void testMultipleTablets() throws IOException {
KeyExtent e1 = new KeyExtent(TableId.of("1"), new Text("m"), null);
KeyExtent e2 = new KeyExtent(TableId.of("1"), null, new Text("m"));
Mutation m1 = new ServerMutation(new Text("b"));
m1.put("f1", "q1", "v1");
Mutation m2 = new ServerMutation(new Text("b"));
m2.put("f1", "q2", "v2");
Mutation m3 = new ServerMutation(new Text("s"));
m3.put("f1", "q1", "v3");
Mutation m4 = new ServerMutation(new Text("s"));
m4.put("f1", "q2", "v4");
KeyValue[] entries1 = { createKeyValue(OPEN, 0, -1, "1"), createKeyValue(DEFINE_TABLET, 7, 10, e1), createKeyValue(DEFINE_TABLET, 5, 11, e2), createKeyValue(MUTATION, 8, 10, m1), createKeyValue(COMPACTION_START, 9, 10, "/t/f1"), createKeyValue(MUTATION, 10, 10, m2), createKeyValue(COMPACTION_FINISH, 10, 10, null), createKeyValue(MUTATION, 6, 11, m3), createKeyValue(COMPACTION_START, 7, 11, "/t/f2"), createKeyValue(MUTATION, 8, 11, m4) };
Arrays.sort(entries1);
Map<String, KeyValue[]> logs = new TreeMap<>();
logs.put("entries1", entries1);
List<Mutation> mutations1 = recover(logs, e1);
assertEquals(1, mutations1.size());
assertEquals(m2, mutations1.get(0));
reset(context);
List<Mutation> mutations2 = recover(logs, e2);
assertEquals(2, mutations2.size());
assertEquals(m3, mutations2.get(0));
assertEquals(m4, mutations2.get(1));
KeyValue[] entries2 = { createKeyValue(OPEN, 0, -1, "1"), createKeyValue(DEFINE_TABLET, 9, 11, e2), createKeyValue(COMPACTION_FINISH, 8, 11, null) };
Arrays.sort(entries2);
logs.put("entries2", entries2);
reset(context);
mutations2 = recover(logs, e2);
assertEquals(1, mutations2.size());
assertEquals(m4, mutations2.get(0));
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class LogFileTest method testReadFields.
@Test
public void testReadFields() throws IOException {
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
key.tserverSession = "";
readWrite(OPEN, -1, -1, null, null, null, key, value);
assertEquals(key.event, OPEN);
readWrite(COMPACTION_FINISH, 1, 2, null, null, null, key, value);
assertEquals(key.event, COMPACTION_FINISH);
assertEquals(key.seq, 1);
assertEquals(key.tabletId, 2);
readWrite(COMPACTION_START, 3, 4, "some file", null, null, key, value);
assertEquals(key.event, COMPACTION_START);
assertEquals(key.seq, 3);
assertEquals(key.tabletId, 4);
assertEquals(key.filename, "some file");
KeyExtent tablet = new KeyExtent(TableId.of("table"), new Text("bbbb"), new Text("aaaa"));
readWrite(DEFINE_TABLET, 5, 6, null, tablet, null, key, value);
assertEquals(key.event, DEFINE_TABLET);
assertEquals(key.seq, 5);
assertEquals(key.tabletId, 6);
assertEquals(key.tablet, tablet);
Mutation m = new ServerMutation(new Text("row"));
m.put("cf", "cq", "value");
readWrite(MUTATION, 7, 8, null, null, new Mutation[] { m }, key, value);
assertEquals(key.event, MUTATION);
assertEquals(key.seq, 7);
assertEquals(key.tabletId, 8);
assertEquals(value.mutations, Arrays.asList(m));
m = new ServerMutation(new Text("row"));
m.put(new Text("cf"), new Text("cq"), new ColumnVisibility("vis"), 12345, new Value("value"));
m.put(new Text("cf"), new Text("cq"), new ColumnVisibility("vis2"), new Value("value"));
m.putDelete(new Text("cf"), new Text("cq"), new ColumnVisibility("vis2"));
readWrite(MUTATION, 8, 9, null, null, new Mutation[] { m }, key, value);
assertEquals(key.event, MUTATION);
assertEquals(key.seq, 8);
assertEquals(key.tabletId, 9);
assertEquals(value.mutations, Arrays.asList(m));
readWrite(MANY_MUTATIONS, 9, 10, null, null, new Mutation[] { m, m }, key, value);
assertEquals(key.event, MANY_MUTATIONS);
assertEquals(key.seq, 9);
assertEquals(key.tabletId, 10);
assertEquals(value.mutations, Arrays.asList(m, m));
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class AccumuloReplicaSystemTest method restartInFileKnowsAboutPreviousTableDefines.
@Test
public void restartInFileKnowsAboutPreviousTableDefines() throws Exception {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
DataOutputStream dos = new DataOutputStream(baos);
LogFileKey key = new LogFileKey();
LogFileValue value = new LogFileValue();
// What is seq used for?
key.seq = 1L;
/*
* Disclaimer: the following series of LogFileKey and LogFileValue pairs have *no* bearing
* whatsoever in reality regarding what these entries would actually look like in a WAL. They
* are solely for testing that each LogEvents is handled, order is not important.
*/
key.event = LogEvents.DEFINE_TABLET;
key.tablet = new KeyExtent(TableId.of("1"), null, null);
key.tabletId = 1;
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
key.tablet = null;
key.event = LogEvents.MUTATION;
key.tabletId = 1;
key.filename = "/accumulo/wals/tserver+port/" + UUID.randomUUID();
value.mutations = Arrays.asList(new ServerMutation(new Text("row")));
key.write(dos);
value.write(dos);
dos.close();
Map<String, String> confMap = new HashMap<>();
confMap.put(Property.REPLICATION_NAME.getKey(), "source");
AccumuloConfiguration conf = new ConfigurationCopy(confMap);
AccumuloReplicaSystem ars = new AccumuloReplicaSystem();
ars.setConf(conf);
Status status = Status.newBuilder().setBegin(0).setEnd(0).setInfiniteEnd(true).setClosed(false).build();
DataInputStream dis = new DataInputStream(new ByteArrayInputStream(baos.toByteArray()));
HashSet<Integer> tids = new HashSet<>();
// Only consume the first mutation, not the second
WalReplication repl = ars.getWalEdits(new ReplicationTarget("peer", "1", TableId.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, 1L, tids);
// We stopped because we got to the end of the file
assertEquals(2, repl.entriesConsumed);
assertEquals(1, repl.walEdits.getEditsSize());
assertEquals(1, repl.sizeInRecords);
assertNotEquals(0, repl.sizeInBytes);
status = Status.newBuilder(status).setBegin(2).build();
// Consume the rest of the mutations
repl = ars.getWalEdits(new ReplicationTarget("peer", "1", TableId.of("1")), dis, new Path("/accumulo/wals/tserver+port/wal"), status, 1L, tids);
// We stopped because we got to the end of the file
assertEquals(1, repl.entriesConsumed);
assertEquals(1, repl.walEdits.getEditsSize());
assertEquals(1, repl.sizeInRecords);
assertNotEquals(0, repl.sizeInBytes);
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class BasicCompactionStrategyTest method testLargeCompaction.
@Test
public void testLargeCompaction() {
ttcs.init(opts);
conf = DefaultConfiguration.getInstance();
KeyExtent ke = new KeyExtent(TableId.of("0"), null, null);
mcr = new MajorCompactionRequest(ke, MajorCompactionReason.NORMAL, conf, getServerContext());
Map<StoredTabletFile, DataFileValue> fileMap = createFileMap("f1", "2G", "f2", "2G", "f3", "2G", "f4", "2G");
mcr.setFiles(fileMap);
assertTrue(ttcs.shouldCompact(mcr));
assertEquals(4, mcr.getFiles().size());
List<StoredTabletFile> filesToCompact = ttcs.getCompactionPlan(mcr).inputFiles;
assertEquals(fileMap.keySet(), new HashSet<>(filesToCompact));
assertEquals(4, filesToCompact.size());
assertEquals(largeCompressionType, ttcs.getCompactionPlan(mcr).writeParameters.getCompressType());
}
use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.
the class BasicCompactionStrategyTest method testFileSubsetCompaction.
@Test
public void testFileSubsetCompaction() {
ttcs.init(opts);
conf = DefaultConfiguration.getInstance();
KeyExtent ke = new KeyExtent(TableId.of("0"), null, null);
mcr = new MajorCompactionRequest(ke, MajorCompactionReason.NORMAL, conf, getServerContext());
Map<StoredTabletFile, DataFileValue> fileMap = createFileMap("f1", "1G", "f2", "10M", "f3", "10M", "f4", "10M", "f5", "10M", "f6", "10M", "f7", "10M");
Map<StoredTabletFile, DataFileValue> filesToCompactMap = createFileMap("f2", "10M", "f3", "10M", "f4", "10M", "f5", "10M", "f6", "10M", "f7", "10M");
mcr.setFiles(fileMap);
assertTrue(ttcs.shouldCompact(mcr));
assertEquals(7, mcr.getFiles().size());
List<StoredTabletFile> filesToCompact = ttcs.getCompactionPlan(mcr).inputFiles;
assertEquals(filesToCompactMap.keySet(), new HashSet<>(filesToCompact));
assertEquals(6, filesToCompact.size());
assertNull(ttcs.getCompactionPlan(mcr).writeParameters);
}
Aggregations