use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class Upgrader9to10 method upgradeDirColumns.
/**
* Changes to how volumes were stored in the metadata and have Accumulo always call the volume
* chooser for new tablet files. These changes were done in
* <a href="https://github.com/apache/accumulo/pull/1389">#1389</a>
*/
public void upgradeDirColumns(ServerContext context, Ample.DataLevel level) {
String tableName = level.metaTable();
AccumuloClient c = context;
try (Scanner scanner = c.createScanner(tableName, Authorizations.EMPTY);
BatchWriter writer = c.createBatchWriter(tableName)) {
DIRECTORY_COLUMN.fetch(scanner);
for (Entry<Key, Value> entry : scanner) {
Mutation m = new Mutation(entry.getKey().getRow());
DIRECTORY_COLUMN.put(m, new Value(upgradeDirColumn(entry.getValue().toString())));
writer.addMutation(m);
}
} catch (TableNotFoundException | AccumuloException e) {
throw new RuntimeException(e);
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class Upgrader9to10Test method normalizeVolume.
@Test
public void normalizeVolume() throws Exception {
String uglyVolume = "hdfs://nn.somewhere.com:86753/accumulo/blah/.././/bad/bad2/../.././/////";
AccumuloClient c = createMock(AccumuloClient.class);
VolumeManager fs = createMock(VolumeManager.class);
expect(fs.exists(anyObject())).andReturn(true).anyTimes();
SortedMap<Key, Value> map = new TreeMap<>();
map.put(new Key("1b<", "file", "../1b/t-000008t/F000004x.rf"), new Value("1"));
map.put(new Key("1b<", "file", "/t-000008t/F0000054.rf"), new Value("2"));
List<Mutation> results = new ArrayList<>();
List<Mutation> expected = new ArrayList<>();
expected.add(replaceMut("1b<", "hdfs://nn.somewhere.com:86753/accumulo/tables/1b/t-000008t/F000004x.rf", "1", "../1b/t-000008t/F000004x.rf"));
expected.add(replaceMut("1b<", "hdfs://nn.somewhere.com:86753/accumulo/tables/1b/t-000008t/F0000054.rf", "2", "/t-000008t/F0000054.rf"));
setupMocks(c, fs, map, results);
Upgrader9to10.replaceRelativePaths(c, fs, tableName, uglyVolume);
verifyPathsReplaced(expected, results);
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class Upgrader9to10Test method missingUpgradeRelativeProperty.
@Test
public void missingUpgradeRelativeProperty() throws Exception {
AccumuloClient c = createMock(AccumuloClient.class);
VolumeManager fs = createMock(VolumeManager.class);
SortedMap<Key, Value> map = new TreeMap<>();
map.put(new Key("1b;row_000050", "file", "../1b/default_tablet/A000001c.rf"), new Value("1"));
map.put(new Key("1b;row_000050", "file", "../1b/default_tablet/F000001m.rf"), new Value("2"));
expect(fs.exists(anyObject(Path.class))).andReturn(false).anyTimes();
setupMocks(c, fs, map, new ArrayList<>());
assertThrows(IllegalArgumentException.class, () -> Upgrader9to10.checkForRelativePaths(c, fs, tableName, ""));
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class ZooCacheIT method test.
@Test
public void test() throws Exception {
assertEquals(0, exec(CacheTestClean.class, pathName, testDir.getAbsolutePath()).waitFor());
final AtomicReference<Exception> ref = new AtomicReference<>();
List<Thread> threads = new ArrayList<>();
for (int i = 0; i < 3; i++) {
Thread reader = new Thread(() -> {
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
CacheTestReader.main(new String[] { pathName, testDir.getAbsolutePath(), ClientInfo.from(client.properties()).getZooKeepers() });
} catch (Exception ex) {
ref.set(ex);
}
});
reader.start();
threads.add(reader);
}
assertEquals(0, exec(CacheTestWriter.class, pathName, testDir.getAbsolutePath(), "3", "50").waitFor());
for (Thread t : threads) {
t.join();
if (ref.get() != null)
throw ref.get();
}
}
use of org.apache.accumulo.core.client.AccumuloClient in project accumulo by apache.
the class SuspendedTabletsIT method setUp.
@Override
@Before
public void setUp() throws Exception {
super.setUp();
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
// Wait for all tablet servers to come online and then choose the first server in the list.
// Update the balancer configuration to assign all metadata tablets to that server (and
// everything else to other servers).
InstanceOperations iops = client.instanceOperations();
List<String> tservers = iops.getTabletServers();
while (tservers == null || tservers.size() < 1) {
Thread.sleep(1000L);
tservers = client.instanceOperations().getTabletServers();
}
HostAndPort metadataServer = HostAndPort.fromString(tservers.get(0));
log.info("Configuring balancer to assign all metadata tablets to {}", metadataServer);
iops.setProperty(HostRegexTableLoadBalancer.HOST_BALANCER_PREFIX + MetadataTable.NAME, metadataServer.toString());
// Wait for the balancer to assign all metadata tablets to the chosen server.
ClientContext ctx = (ClientContext) client;
TabletLocations tl = TabletLocations.retrieve(ctx, MetadataTable.NAME, RootTable.NAME);
while (tl.hosted.keySet().size() != 1 || !tl.hosted.containsKey(metadataServer)) {
log.info("Metadata tablets are not hosted on the correct server. Waiting for balancer...");
Thread.sleep(1000L);
tl = TabletLocations.retrieve(ctx, MetadataTable.NAME, RootTable.NAME);
}
log.info("Metadata tablets are now hosted on {}", metadataServer);
}
// Since we started only a single tablet server, we know it's the one hosting the
// metadata table. Save its process reference off so we can exclude it later when
// killing tablet servers.
Collection<ProcessReference> procs = getCluster().getProcesses().get(ServerType.TABLET_SERVER);
assertEquals("Expected a single tserver process", 1, procs.size());
metadataTserverProcess = procs.iterator().next();
// Update the number of tservers and start the new tservers.
getCluster().getConfig().setNumTservers(TSERVERS);
getCluster().start();
}
Aggregations