Search in sources :

Example 1 with Connector

use of org.apache.accumulo.core.client.Connector in project hive by apache.

the class TestAccumuloStorageHandler method testNonExternalExistentTable.

@Test(expected = MetaException.class)
public void testNonExternalExistentTable() throws Exception {
    MockInstance inst = new MockInstance(test.getMethodName());
    Connector conn = inst.getConnector("root", new PasswordToken(""));
    String tableName = "table";
    // Create the table
    conn.tableOperations().create(tableName);
    // Define the SerDe Parameters
    Map<String, String> params = new HashMap<String, String>();
    params.put(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq");
    AccumuloConnectionParameters connectionParams = Mockito.mock(AccumuloConnectionParameters.class);
    AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class);
    StorageDescriptor sd = Mockito.mock(StorageDescriptor.class);
    Table table = Mockito.mock(Table.class);
    SerDeInfo serDeInfo = Mockito.mock(SerDeInfo.class);
    // Call the real preCreateTable method
    Mockito.doCallRealMethod().when(storageHandler).preCreateTable(table);
    // Return our known table name
    Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName);
    // Is not an EXTERNAL table
    Mockito.when(storageHandler.isExternalTable(table)).thenReturn(false);
    // Return the mocked StorageDescriptor
    Mockito.when(table.getSd()).thenReturn(sd);
    // No location expected with AccumuloStorageHandler
    Mockito.when(sd.getLocation()).thenReturn(null);
    // Return mocked SerDeInfo
    Mockito.when(sd.getSerdeInfo()).thenReturn(serDeInfo);
    // Custom parameters
    Mockito.when(serDeInfo.getParameters()).thenReturn(params);
    // Return the MockInstance's Connector
    Mockito.when(connectionParams.getConnector()).thenReturn(conn);
    storageHandler.connectionParams = connectionParams;
    storageHandler.preCreateTable(table);
}
Also used : Connector(org.apache.accumulo.core.client.Connector) PasswordToken(org.apache.accumulo.core.client.security.tokens.PasswordToken) Table(org.apache.hadoop.hive.metastore.api.Table) MockInstance(org.apache.accumulo.core.client.mock.MockInstance) HashMap(java.util.HashMap) SerDeInfo(org.apache.hadoop.hive.metastore.api.SerDeInfo) StorageDescriptor(org.apache.hadoop.hive.metastore.api.StorageDescriptor) Test(org.junit.Test)

Example 2 with Connector

use of org.apache.accumulo.core.client.Connector in project YCSB by brianfrankcooper.

the class AccumuloTest method truncateTable.

@After
public void truncateTable() throws Exception {
    if (cluster != null) {
        LOG.debug("truncating table {}", CoreWorkload.TABLENAME_PROPERTY_DEFAULT);
        final Connector admin = cluster.getConnector("root", "protectyaneck");
        admin.tableOperations().deleteRows(CoreWorkload.TABLENAME_PROPERTY_DEFAULT, null, null);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) After(org.junit.After)

Example 3 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class SplitRecoveryIT method test.

@Test
public void test() throws Exception {
    String tableName = getUniqueNames(1)[0];
    for (int tn = 0; tn < 2; tn++) {
        Connector connector = getConnector();
        // create a table and put some data in it
        connector.tableOperations().create(tableName);
        BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
        bw.addMutation(m("a"));
        bw.addMutation(m("b"));
        bw.addMutation(m("c"));
        bw.close();
        // take the table offline
        connector.tableOperations().offline(tableName);
        while (!isOffline(tableName, connector)) sleepUninterruptibly(200, TimeUnit.MILLISECONDS);
        // poke a partial split into the metadata table
        connector.securityOperations().grantTablePermission(getAdminPrincipal(), MetadataTable.NAME, TablePermission.WRITE);
        Table.ID tableId = Table.ID.of(connector.tableOperations().tableIdMap().get(tableName));
        KeyExtent extent = new KeyExtent(tableId, null, new Text("b"));
        Mutation m = extent.getPrevRowUpdateMutation();
        TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.put(m, new Value(Double.toString(0.5).getBytes()));
        TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(null));
        bw = connector.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig());
        bw.addMutation(m);
        if (tn == 1) {
            bw.flush();
            try (Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
                scanner.setRange(extent.toMetadataRange());
                scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
                KeyExtent extent2 = new KeyExtent(tableId, new Text("b"), null);
                m = extent2.getPrevRowUpdateMutation();
                TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t2".getBytes()));
                TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value("M0".getBytes()));
                for (Entry<Key, Value> entry : scanner) {
                    m.put(DataFileColumnFamily.NAME, entry.getKey().getColumnQualifier(), entry.getValue());
                }
                bw.addMutation(m);
            }
        }
        bw.close();
        // bring the table online
        connector.tableOperations().online(tableName);
        // verify the tablets went online
        try (Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY)) {
            int i = 0;
            String[] expected = { "a", "b", "c" };
            for (Entry<Key, Value> entry : scanner) {
                assertEquals(expected[i], entry.getKey().getRow().toString());
                i++;
            }
            assertEquals(3, i);
            connector.tableOperations().delete(tableName);
        }
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Table(org.apache.accumulo.core.client.impl.Table) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) Text(org.apache.hadoop.io.Text) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Example 4 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class TabletServerGivesUpIT method test.

@Test(timeout = 45 * 1000)
public void test() throws Exception {
    final Connector conn = this.getConnector();
    // Yes, there's a tabletserver
    assertEquals(1, conn.instanceOperations().getTabletServers().size());
    final String tableName = getUniqueNames(1)[0];
    conn.tableOperations().create(tableName);
    // Kill dfs
    cluster.getMiniDfs().shutdown();
    // ask the tserver to do something
    final AtomicReference<Exception> ex = new AtomicReference<>();
    Thread splitter = new Thread() {

        @Override
        public void run() {
            try {
                TreeSet<Text> splits = new TreeSet<>();
                splits.add(new Text("X"));
                conn.tableOperations().addSplits(tableName, splits);
            } catch (Exception e) {
                ex.set(e);
            }
        }
    };
    splitter.start();
    // wait for the tserver to give up on writing to the WAL
    while (conn.instanceOperations().getTabletServers().size() == 1) {
        sleepUninterruptibly(1, TimeUnit.SECONDS);
    }
}
Also used : Connector(org.apache.accumulo.core.client.Connector) TreeSet(java.util.TreeSet) AtomicReference(java.util.concurrent.atomic.AtomicReference) Text(org.apache.hadoop.io.Text) Test(org.junit.Test)

Example 5 with Connector

use of org.apache.accumulo.core.client.Connector in project accumulo by apache.

the class ChaoticBalancerIT method test.

@Test
public void test() throws Exception {
    Connector c = getConnector();
    String[] names = getUniqueNames(1);
    String tableName = names[0];
    NewTableConfiguration ntc = new NewTableConfiguration();
    ntc.setProperties(Stream.of(new Pair<>(Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K"), new Pair<>(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), "1K")).collect(Collectors.toMap(k -> k.getFirst(), v -> v.getSecond())));
    c.tableOperations().create(tableName, ntc);
    TestIngest.Opts opts = new TestIngest.Opts();
    VerifyIngest.Opts vopts = new VerifyIngest.Opts();
    vopts.rows = opts.rows = 20000;
    opts.setTableName(tableName);
    vopts.setTableName(tableName);
    ClientConfiguration clientConfig = getCluster().getClientConfig();
    if (clientConfig.hasSasl()) {
        opts.updateKerberosCredentials(clientConfig);
        vopts.updateKerberosCredentials(clientConfig);
    } else {
        opts.setPrincipal(getAdminPrincipal());
        vopts.setPrincipal(getAdminPrincipal());
    }
    TestIngest.ingest(c, opts, new BatchWriterOpts());
    c.tableOperations().flush(tableName, null, null, true);
    VerifyIngest.verifyIngest(c, vopts, new ScannerOpts());
}
Also used : Connector(org.apache.accumulo.core.client.Connector) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) ScannerOpts(org.apache.accumulo.core.cli.ScannerOpts) TestIngest(org.apache.accumulo.test.TestIngest) VerifyIngest(org.apache.accumulo.test.VerifyIngest) NewTableConfiguration(org.apache.accumulo.core.client.admin.NewTableConfiguration) BatchWriterOpts(org.apache.accumulo.core.cli.BatchWriterOpts) ClientConfiguration(org.apache.accumulo.core.client.ClientConfiguration) Test(org.junit.Test)

Aggregations

Connector (org.apache.accumulo.core.client.Connector)622 Test (org.junit.Test)415 BatchWriter (org.apache.accumulo.core.client.BatchWriter)171 Value (org.apache.accumulo.core.data.Value)162 Text (org.apache.hadoop.io.Text)160 Scanner (org.apache.accumulo.core.client.Scanner)158 Mutation (org.apache.accumulo.core.data.Mutation)152 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)143 Key (org.apache.accumulo.core.data.Key)139 PasswordToken (org.apache.accumulo.core.client.security.tokens.PasswordToken)101 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)87 AccumuloException (org.apache.accumulo.core.client.AccumuloException)83 IteratorSetting (org.apache.accumulo.core.client.IteratorSetting)75 Range (org.apache.accumulo.core.data.Range)74 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)65 Authorizations (org.apache.accumulo.core.security.Authorizations)60 HashSet (java.util.HashSet)57 Instance (org.apache.accumulo.core.client.Instance)55 ArrayList (java.util.ArrayList)53 Entry (java.util.Map.Entry)53