use of org.apache.accumulo.server.master.state.MergeInfo in project accumulo by apache.
the class TableRangeOpWait method call.
@Override
public Repo<Master> call(long tid, Master master) throws Exception {
MergeInfo mergeInfo = master.getMergeInfo(tableId);
log.info("removing merge information " + mergeInfo);
master.clearMergeState(tableId);
Utils.unreserveTable(tableId, tid, true);
Utils.unreserveNamespace(namespaceId, tid, false);
return null;
}
use of org.apache.accumulo.server.master.state.MergeInfo in project accumulo by apache.
the class MergeStats method main.
public static void main(String[] args) throws Exception {
ClientOpts opts = new ClientOpts();
opts.parseArgs(MergeStats.class.getName(), args);
Connector conn = opts.getConnector();
Map<String, String> tableIdMap = conn.tableOperations().tableIdMap();
for (Entry<String, String> entry : tableIdMap.entrySet()) {
final String table = entry.getKey(), tableId = entry.getValue();
String path = ZooUtil.getRoot(conn.getInstance().getInstanceID()) + Constants.ZTABLES + "/" + tableId + "/merge";
MergeInfo info = new MergeInfo();
if (ZooReaderWriter.getInstance().exists(path)) {
byte[] data = ZooReaderWriter.getInstance().getData(path, new Stat());
DataInputBuffer in = new DataInputBuffer();
in.reset(data, data.length);
info.readFields(in);
}
System.out.println(String.format("%25s %10s %10s %s", table, info.getState(), info.getOperation(), info.getExtent()));
}
}
use of org.apache.accumulo.server.master.state.MergeInfo in project accumulo by apache.
the class MergeStateIT method test.
@Test
public void test() throws Exception {
AccumuloServerContext context = EasyMock.createMock(AccumuloServerContext.class);
Connector connector = getConnector();
EasyMock.expect(context.getConnector()).andReturn(connector).anyTimes();
EasyMock.replay(context);
connector.securityOperations().grantTablePermission(connector.whoami(), MetadataTable.NAME, TablePermission.WRITE);
BatchWriter bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
// Create a fake METADATA table with these splits
String[] splits = { "a", "e", "j", "o", "t", "z" };
// create metadata for a table "t" with the splits above
Table.ID tableId = Table.ID.of("t");
Text pr = null;
for (String s : splits) {
Text split = new Text(s);
Mutation prevRow = KeyExtent.getPrevRowUpdateMutation(new KeyExtent(tableId, split, pr));
prevRow.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
ChoppedColumnFamily.CHOPPED_COLUMN.put(prevRow, new Value("junk".getBytes()));
bw.addMutation(prevRow);
pr = split;
}
// Add the default tablet
Mutation defaultTablet = KeyExtent.getPrevRowUpdateMutation(new KeyExtent(tableId, null, pr));
defaultTablet.put(TabletsSection.CurrentLocationColumnFamily.NAME, new Text("123456"), new Value("127.0.0.1:1234".getBytes()));
bw.addMutation(defaultTablet);
bw.close();
// Read out the TabletLocationStates
MockCurrentState state = new MockCurrentState(new MergeInfo(new KeyExtent(tableId, new Text("p"), new Text("e")), MergeInfo.Operation.MERGE));
// Verify the tablet state: hosted, and count
MetaDataStateStore metaDataStateStore = new MetaDataStateStore(context, state);
int count = 0;
for (TabletLocationState tss : metaDataStateStore) {
if (tss != null)
count++;
}
// the normal case is to skip tablets in a good state
Assert.assertEquals(0, count);
// Create the hole
// Split the tablet at one end of the range
Mutation m = new KeyExtent(tableId, new Text("t"), new Text("p")).getPrevRowUpdateMutation();
TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(new Text("o")));
update(connector, m);
// do the state check
MergeStats stats = scan(state, metaDataStateStore);
MergeState newState = stats.nextMergeState(connector, state);
Assert.assertEquals(MergeState.WAITING_FOR_OFFLINE, newState);
// unassign the tablets
BatchDeleter deleter = connector.createBatchDeleter(MetadataTable.NAME, Authorizations.EMPTY, 1000, new BatchWriterConfig());
deleter.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
deleter.setRanges(Collections.singletonList(new Range()));
deleter.delete();
// now we should be ready to merge but, we have inconsistent metadata
stats = scan(state, metaDataStateStore);
Assert.assertEquals(MergeState.WAITING_FOR_OFFLINE, stats.nextMergeState(connector, state));
// finish the split
KeyExtent tablet = new KeyExtent(tableId, new Text("p"), new Text("o"));
m = tablet.getPrevRowUpdateMutation();
TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value("0.5".getBytes()));
update(connector, m);
metaDataStateStore.setLocations(Collections.singletonList(new Assignment(tablet, state.someTServer)));
// onos... there's a new tablet online
stats = scan(state, metaDataStateStore);
Assert.assertEquals(MergeState.WAITING_FOR_CHOPPED, stats.nextMergeState(connector, state));
// chop it
m = tablet.getPrevRowUpdateMutation();
ChoppedColumnFamily.CHOPPED_COLUMN.put(m, new Value("junk".getBytes()));
update(connector, m);
stats = scan(state, metaDataStateStore);
Assert.assertEquals(MergeState.WAITING_FOR_OFFLINE, stats.nextMergeState(connector, state));
// take it offline
m = tablet.getPrevRowUpdateMutation();
Collection<Collection<String>> walogs = Collections.emptyList();
metaDataStateStore.unassign(Collections.singletonList(new TabletLocationState(tablet, null, state.someTServer, null, null, walogs, false)), null);
// now we can split
stats = scan(state, metaDataStateStore);
Assert.assertEquals(MergeState.MERGING, stats.nextMergeState(connector, state));
}
Aggregations