use of org.apache.accumulo.core.client.Connector in project hive by apache.
the class TestAccumuloStorageHandler method testNonExternalExistentTable.
@Test(expected = MetaException.class)
public void testNonExternalExistentTable() throws Exception {
MockInstance inst = new MockInstance(test.getMethodName());
Connector conn = inst.getConnector("root", new PasswordToken(""));
String tableName = "table";
// Create the table
conn.tableOperations().create(tableName);
// Define the SerDe Parameters
Map<String, String> params = new HashMap<String, String>();
params.put(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq");
AccumuloConnectionParameters connectionParams = Mockito.mock(AccumuloConnectionParameters.class);
AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class);
StorageDescriptor sd = Mockito.mock(StorageDescriptor.class);
Table table = Mockito.mock(Table.class);
SerDeInfo serDeInfo = Mockito.mock(SerDeInfo.class);
// Call the real preCreateTable method
Mockito.doCallRealMethod().when(storageHandler).preCreateTable(table);
// Return our known table name
Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName);
// Is not an EXTERNAL table
Mockito.when(storageHandler.isExternalTable(table)).thenReturn(false);
// Return the mocked StorageDescriptor
Mockito.when(table.getSd()).thenReturn(sd);
// No location expected with AccumuloStorageHandler
Mockito.when(sd.getLocation()).thenReturn(null);
// Return mocked SerDeInfo
Mockito.when(sd.getSerdeInfo()).thenReturn(serDeInfo);
// Custom parameters
Mockito.when(serDeInfo.getParameters()).thenReturn(params);
// Return the MockInstance's Connector
Mockito.when(connectionParams.getConnector()).thenReturn(conn);
storageHandler.connectionParams = connectionParams;
storageHandler.preCreateTable(table);
}
use of org.apache.accumulo.core.client.Connector in project YCSB by brianfrankcooper.
the class AccumuloTest method truncateTable.
@After
public void truncateTable() throws Exception {
if (cluster != null) {
LOG.debug("truncating table {}", CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
final Connector admin = cluster.getConnector("root", "protectyaneck");
admin.tableOperations().deleteRows(CoreWorkload.TABLENAME_PROPERTY_DEFAULT, null, null);
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class SplitRecoveryIT method test.
@Test
public void test() throws Exception {
String tableName = getUniqueNames(1)[0];
for (int tn = 0; tn < 2; tn++) {
Connector connector = getConnector();
// create a table and put some data in it
connector.tableOperations().create(tableName);
BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
bw.addMutation(m("a"));
bw.addMutation(m("b"));
bw.addMutation(m("c"));
bw.close();
// take the table offline
connector.tableOperations().offline(tableName);
while (!isOffline(tableName, connector)) sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
// poke a partial split into the metadata table
connector.securityOperations().grantTablePermission(getAdminPrincipal(), MetadataTable.NAME, TablePermission.WRITE);
Table.ID tableId = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
KeyExtent extent = new KeyExtent(tableId, null, new Text("b"));
Mutation m = extent.getPrevRowUpdateMutation();
TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(0.5).getBytes()));
TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(null));
bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
bw.addMutation(m);
if (tn == 1) {
bw.flush();
try (Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
scanner.setRange(extent.toMetadataRange());
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
KeyExtent extent2 = new KeyExtent(tableId, new Text("b"), null);
m = extent2.getPrevRowUpdateMutation();
TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t2".getBytes()));
TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value("M0".getBytes()));
for (Entry<Key, Value> entry : scanner) {
m.put(DataFileColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
}
bw.addMutation(m);
}
}
bw.close();
// bring the table online
connector.tableOperations().online(tableName);
// verify the tablets went online
try (Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY)) {
int i = 0;
String[] expected = { "a", "b", "c" };
for (Entry<Key, Value> entry : scanner) {
assertEquals(expected[i], entry.getKey().getRow().toString());
i++;
}
assertEquals(3, i);
connector.tableOperations().delete(tableName);
}
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class TabletServerGivesUpIT method test.
@Test(timeout = 45 * 1000)
public void test() throws Exception {
final Connector conn = this.getConnector();
// Yes, there's a tabletserver
assertEquals(1, conn.instanceOperations().getTabletServers().size());
final String tableName = getUniqueNames(1)[0];
conn.tableOperations().create(tableName);
// Kill dfs
cluster.getMiniDfs().shutdown();
// ask the tserver to do something
final AtomicReference<Exception> ex = new AtomicReference<>();
Thread splitter = new Thread() {
@Override
public void run() {
try {
TreeSet<Text> splits = new TreeSet<>();
splits.add(new Text("X"));
conn.tableOperations().addSplits(tableName, splits);
} catch (Exception e) {
ex.set(e);
}
}
};
splitter.start();
// wait for the tserver to give up on writing to the WAL
while (conn.instanceOperations().getTabletServers().size() == 1) {
sleepUninterruptibly(1, TimeUnit.SECONDS);
}
}
use of org.apache.accumulo.core.client.Connector in project accumulo by apache.
the class ChaoticBalancerIT method test.
@Test
public void test() throws Exception {
Connector c = getConnector();
String[] names = getUniqueNames(1);
String tableName = names[0];
NewTableConfiguration ntc = new NewTableConfiguration();
ntc.setProperties(Stream.of(new Pair<>(Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K"), new Pair<>(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K")).collect(Collectors.toMap(k -> k.getFirst(), v -> v.getSecond())));
c.tableOperations().create(tableName, ntc);
TestIngest.Opts opts = new TestIngest.Opts();
VerifyIngest.Opts vopts = new VerifyIngest.Opts();
vopts.rows = opts.rows = 20000;
opts.setTableName(tableName);
vopts.setTableName(tableName);
ClientConfiguration clientConfig = getCluster().getClientConfig();
if (clientConfig.hasSasl()) {
opts.updateKerberosCredentials(clientConfig);
vopts.updateKerberosCredentials(clientConfig);
} else {
opts.setPrincipal(getAdminPrincipal());
vopts.setPrincipal(getAdminPrincipal());
}
TestIngest.ingest(c, opts, new BatchWriterOpts());
c.tableOperations().flush(tableName, null, null, true);
VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
}
Aggregations