Search in sources :

Example 46 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class SequentialWorkAssignerTest method basicZooKeeperCleanup.

@Test
public void basicZooKeeperCleanup() throws Exception {
    DistributedWorkQueue workQueue = createMock(DistributedWorkQueue.class);
    ZooCache zooCache = createMock(ZooCache.class);
    Instance inst = createMock(Instance.class);
    Map<String, Map<Table.ID, String>> queuedWork = new TreeMap<>();
    Map<Table.ID, String> cluster1Work = new TreeMap<>();
    // Two files for cluster1, one for table '1' and another for table '2' we havce assigned work for
    cluster1Work.put(Table.ID.of("1"), DistributedWorkQueueWorkAssignerHelper.getQueueKey("file1", new ReplicationTarget("cluster1", "1", Table.ID.of("1"))));
    cluster1Work.put(Table.ID.of("2"), DistributedWorkQueueWorkAssignerHelper.getQueueKey("file2", new ReplicationTarget("cluster1", "2", Table.ID.of("2"))));
    queuedWork.put("cluster1", cluster1Work);
    assigner.setConnector(conn);
    assigner.setZooCache(zooCache);
    assigner.setWorkQueue(workQueue);
    assigner.setQueuedWork(queuedWork);
    expect(conn.getInstance()).andReturn(inst);
    expect(inst.getInstanceID()).andReturn("instance");
    // file1 replicated
    expect(zooCache.get(ZooUtil.getRoot("instance") + ReplicationConstants.ZOO_WORK_QUEUE + "/" + DistributedWorkQueueWorkAssignerHelper.getQueueKey("file1", new ReplicationTarget("cluster1", "1", Table.ID.of("1"))))).andReturn(null);
    // file2 still needs to replicate
    expect(zooCache.get(ZooUtil.getRoot("instance") + ReplicationConstants.ZOO_WORK_QUEUE + "/" + DistributedWorkQueueWorkAssignerHelper.getQueueKey("file2", new ReplicationTarget("cluster1", "2", Table.ID.of("2"))))).andReturn(new byte[0]);
    replay(workQueue, zooCache, conn, inst);
    assigner.cleanupFinishedWork();
    verify(workQueue, zooCache, conn, inst);
    Assert.assertEquals(1, cluster1Work.size());
    Assert.assertEquals(DistributedWorkQueueWorkAssignerHelper.getQueueKey("file2", new ReplicationTarget("cluster1", "2", Table.ID.of("2"))), cluster1Work.get(Table.ID.of("2")));
}
Also used : Table(org.apache.accumulo.core.client.impl.Table) ReplicationTarget(org.apache.accumulo.core.replication.ReplicationTarget) Instance(org.apache.accumulo.core.client.Instance) TreeMap(java.util.TreeMap) ZooCache(org.apache.accumulo.server.zookeeper.ZooCache) TreeMap(java.util.TreeMap) Map(java.util.Map) DistributedWorkQueue(org.apache.accumulo.server.zookeeper.DistributedWorkQueue) Test(org.junit.Test)

Example 47 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class ImportTable method call.

@Override
public Repo<Master> call(long tid, Master env) throws Exception {
    checkVersions(env);
    // first step is to reserve a table id.. if the machine fails during this step
    // it is ok to retry... the only side effect is that a table id may not be used
    // or skipped
    // assuming only the master process is creating tables
    Utils.idLock.lock();
    try {
        Instance instance = env.getInstance();
        tableInfo.tableId = Utils.getNextId(tableInfo.tableName, instance, Table.ID::of);
        return new ImportSetupPermissions(tableInfo);
    } finally {
        Utils.idLock.unlock();
    }
}
Also used : Table(org.apache.accumulo.core.client.impl.Table) Instance(org.apache.accumulo.core.client.Instance)

Example 48 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class TablesResource method getParticipatingTabletServers.

/**
 * Generates a list of participating tservers for a table
 *
 * @param tableIdStr
 *          Table ID to find participating tservers
 * @return List of participating tservers
 */
@Path("{tableId}")
@GET
public TabletServers getParticipatingTabletServers(@PathParam("tableId") @NotNull @Pattern(regexp = ALPHA_NUM_REGEX_TABLE_ID) String tableIdStr) {
    Instance instance = Monitor.getContext().getInstance();
    Table.ID tableId = Table.ID.of(tableIdStr);
    TabletServers tabletServers = new TabletServers(Monitor.getMmi().tServerInfo.size());
    if (StringUtils.isBlank(tableIdStr)) {
        return tabletServers;
    }
    TreeSet<String> locs = new TreeSet<>();
    if (RootTable.ID.equals(tableId)) {
        locs.add(instance.getRootTabletLocation());
    } else {
        String systemTableName = MetadataTable.ID.equals(tableId) ? RootTable.NAME : MetadataTable.NAME;
        MetaDataTableScanner scanner = new MetaDataTableScanner(Monitor.getContext(), new Range(KeyExtent.getMetadataEntry(tableId, new Text()), KeyExtent.getMetadataEntry(tableId, null)), systemTableName);
        while (scanner.hasNext()) {
            TabletLocationState state = scanner.next();
            if (state.current != null) {
                try {
                    locs.add(state.current.hostPort());
                } catch (Exception ex) {
                    scanner.close();
                    return tabletServers;
                }
            }
        }
        scanner.close();
    }
    List<TabletServerStatus> tservers = new ArrayList<>();
    if (Monitor.getMmi() != null) {
        for (TabletServerStatus tss : Monitor.getMmi().tServerInfo) {
            try {
                if (tss.name != null && locs.contains(tss.name))
                    tservers.add(tss);
            } catch (Exception ex) {
                return tabletServers;
            }
        }
    }
    // Adds tservers to the list
    for (TabletServerStatus status : tservers) {
        if (status == null)
            status = NO_STATUS;
        TableInfo summary = TableInfoUtil.summarizeTableStats(status);
        if (tableId != null)
            summary = status.tableMap.get(tableId.canonicalID());
        if (summary == null)
            continue;
        TabletServer tabletServerInfo = new TabletServer();
        tabletServerInfo.updateTabletServerInfo(status, summary);
        tabletServers.addTablet(tabletServerInfo);
    }
    return tabletServers;
}
Also used : MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) RootTable(org.apache.accumulo.core.metadata.RootTable) Table(org.apache.accumulo.core.client.impl.Table) Instance(org.apache.accumulo.core.client.Instance) HdfsZooInstance(org.apache.accumulo.server.client.HdfsZooInstance) ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) TabletServers(org.apache.accumulo.monitor.rest.tservers.TabletServers) TreeSet(java.util.TreeSet) MetaDataTableScanner(org.apache.accumulo.server.master.state.MetaDataTableScanner) TabletServer(org.apache.accumulo.monitor.rest.tservers.TabletServer) TabletLocationState(org.apache.accumulo.server.master.state.TabletLocationState) TableInfo(org.apache.accumulo.core.master.thrift.TableInfo) TabletServerStatus(org.apache.accumulo.core.master.thrift.TabletServerStatus) Path(javax.ws.rs.Path) GET(javax.ws.rs.GET)

Example 49 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class TableOperation method execute.

@Override
public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws Exception {
    // populate the tableSet set with the tables you want to operate on
    final SortedSet<String> tableSet = new TreeSet<>();
    if (cl.hasOption(optTablePattern.getOpt())) {
        String tablePattern = cl.getOptionValue(optTablePattern.getOpt());
        for (String table : shellState.getConnector().tableOperations().list()) if (table.matches(tablePattern)) {
            tableSet.add(table);
        }
        pruneTables(tablePattern, tableSet);
    } else if (cl.hasOption(optTableName.getOpt())) {
        tableSet.add(cl.getOptionValue(optTableName.getOpt()));
    } else if (cl.hasOption(optNamespace.getOpt())) {
        Instance instance = shellState.getInstance();
        Namespace.ID namespaceId = Namespaces.getNamespaceId(instance, cl.getOptionValue(optNamespace.getOpt()));
        for (Table.ID tableId : Namespaces.getTableIds(instance, namespaceId)) {
            tableSet.add(Tables.getTableName(instance, tableId));
        }
    } else if (useCommandLine && cl.getArgs().length > 0) {
        for (String tableName : cl.getArgs()) {
            tableSet.add(tableName);
        }
    } else {
        shellState.checkTableState();
        tableSet.add(shellState.getTableName());
    }
    if (tableSet.isEmpty())
        Shell.log.warn("No tables found that match your criteria");
    boolean more = true;
    // flush the tables
    for (String tableName : tableSet) {
        if (!more) {
            break;
        }
        if (!shellState.getConnector().tableOperations().exists(tableName)) {
            throw new TableNotFoundException(null, tableName, null);
        }
        boolean operate = true;
        if (!force) {
            shellState.getReader().flush();
            String line = shellState.getReader().readLine(getName() + " { " + tableName + " } (yes|no)? ");
            more = line != null;
            operate = line != null && (line.equalsIgnoreCase("y") || line.equalsIgnoreCase("yes"));
        }
        if (operate) {
            doTableOp(shellState, tableName);
        }
    }
    return 0;
}
Also used : TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Table(org.apache.accumulo.core.client.impl.Table) Instance(org.apache.accumulo.core.client.Instance) TreeSet(java.util.TreeSet) Namespace(org.apache.accumulo.core.client.impl.Namespace)

Example 50 with Table

use of org.apache.accumulo.core.client.impl.Table in project accumulo by apache.

the class FileArchiveIT method testUnusuedFilesAreArchived.

@Test
public void testUnusuedFilesAreArchived() throws Exception {
    final Connector conn = getConnector();
    final String tableName = getUniqueNames(1)[0];
    conn.tableOperations().create(tableName);
    final Table.ID tableId = Table.ID.of(conn.tableOperations().tableIdMap().get(tableName));
    Assert.assertNotNull("Could not get table ID", tableId);
    BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig());
    Mutation m = new Mutation("row");
    m.put("", "", "value");
    bw.addMutation(m);
    bw.close();
    // Compact memory to disk
    conn.tableOperations().compact(tableName, null, null, true, true);
    try (Scanner s = conn.createScanner(MetadataTable.NAME, Authorizations.EMPTY)) {
        s.setRange(MetadataSchema.TabletsSection.getRange(tableId));
        s.fetchColumnFamily(MetadataSchema.TabletsSection.DataFileColumnFamily.NAME);
        Entry<Key, Value> entry = Iterables.getOnlyElement(s);
        final String file = entry.getKey().getColumnQualifier().toString();
        final Path p = new Path(file);
        // Then force another to make an unreferenced file
        conn.tableOperations().compact(tableName, null, null, true, true);
        log.info("File for table: {}", file);
        FileSystem fs = getCluster().getFileSystem();
        int i = 0;
        while (fs.exists(p)) {
            i++;
            Thread.sleep(1000);
            if (0 == i % 10) {
                log.info("Waited {} iterations, file still exists", i);
            }
        }
        log.info("File was removed");
        String filePath = p.toUri().getPath().substring(getCluster().getConfig().getAccumuloDir().toString().length());
        log.info("File relative to accumulo dir: {}", filePath);
        Path fileArchiveDir = new Path(getCluster().getConfig().getAccumuloDir().toString(), ServerConstants.FILE_ARCHIVE_DIR);
        Assert.assertTrue("File archive directory didn't exist", fs.exists(fileArchiveDir));
        // Remove the leading '/' to make sure Path treats the 2nd arg as a child.
        Path archivedFile = new Path(fileArchiveDir, filePath.substring(1));
        Assert.assertTrue("File doesn't exists in archive directory: " + archivedFile, fs.exists(archivedFile));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Connector(org.apache.accumulo.core.client.Connector) Scanner(org.apache.accumulo.core.client.Scanner) Table(org.apache.accumulo.core.client.impl.Table) MetadataTable(org.apache.accumulo.core.metadata.MetadataTable) FileSystem(org.apache.hadoop.fs.FileSystem) Value(org.apache.accumulo.core.data.Value) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) Key(org.apache.accumulo.core.data.Key) Test(org.junit.Test)

Aggregations

Table (org.apache.accumulo.core.client.impl.Table)55 MetadataTable (org.apache.accumulo.core.metadata.MetadataTable)34 Value (org.apache.accumulo.core.data.Value)27 Key (org.apache.accumulo.core.data.Key)25 Text (org.apache.hadoop.io.Text)25 Scanner (org.apache.accumulo.core.client.Scanner)21 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)21 Test (org.junit.Test)21 Connector (org.apache.accumulo.core.client.Connector)19 Mutation (org.apache.accumulo.core.data.Mutation)18 ReplicationTable (org.apache.accumulo.core.replication.ReplicationTable)18 BatchWriter (org.apache.accumulo.core.client.BatchWriter)17 TableNotFoundException (org.apache.accumulo.core.client.TableNotFoundException)17 AccumuloSecurityException (org.apache.accumulo.core.client.AccumuloSecurityException)15 ArrayList (java.util.ArrayList)14 AccumuloException (org.apache.accumulo.core.client.AccumuloException)14 Path (org.apache.hadoop.fs.Path)14 HashSet (java.util.HashSet)11 BatchWriterConfig (org.apache.accumulo.core.client.BatchWriterConfig)11 HashMap (java.util.HashMap)9