Search in sources :

Example 66 with TServerInstance

use of org.apache.accumulo.server.master.state.TServerInstance in project accumulo by apache.

the class NullTserver method main.

public static void main(String[] args) throws Exception {
    Opts opts = new Opts();
    opts.parseArgs(NullTserver.class.getName(), args);
    // modify metadata
    ZooKeeperInstance zki = new ZooKeeperInstance(ClientConfiguration.create().withInstance(opts.iname).withZkHosts(opts.keepers));
    Instance inst = HdfsZooInstance.getInstance();
    AccumuloServerContext context = new AccumuloServerContext(inst, new ServerConfigurationFactory(zki));
    TransactionWatcher watcher = new TransactionWatcher();
    ThriftClientHandler tch = new ThriftClientHandler(new AccumuloServerContext(inst, new ServerConfigurationFactory(inst)), watcher);
    Processor<Iface> processor = new Processor<>(tch);
    TServerUtils.startTServer(context.getConfiguration(), ThriftServerType.CUSTOM_HS_HA, processor, "NullTServer", "null tserver", 2, 1, 1000, 10 * 1024 * 1024, null, null, -1, HostAndPort.fromParts("0.0.0.0", opts.port));
    HostAndPort addr = HostAndPort.fromParts(InetAddress.getLocalHost().getHostName(), opts.port);
    Table.ID tableId = Tables.getTableId(zki, opts.tableName);
    // read the locations for the table
    Range tableRange = new KeyExtent(tableId, null, null).toMetadataRange();
    List<Assignment> assignments = new ArrayList<>();
    try (MetaDataTableScanner s = new MetaDataTableScanner(context, tableRange)) {
        long randomSessionID = opts.port;
        TServerInstance instance = new TServerInstance(addr, randomSessionID);
        while (s.hasNext()) {
            TabletLocationState next = s.next();
            assignments.add(new Assignment(next.extent, instance));
        }
    }
    // point them to this server
    MetaDataStateStore store = new MetaDataStateStore(context);
    store.setLocations(assignments);
    while (true) {
        sleepUninterruptibly(10, TimeUnit.SECONDS);
    }
}
Also used : AccumuloServerContext(org.apache.accumulo.server.AccumuloServerContext) Processor(org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Processor) Table(org.apache.accumulo.core.client.impl.Table) Instance(org.apache.accumulo.core.client.Instance) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) HdfsZooInstance(org.apache.accumulo.server.client.HdfsZooInstance) ArrayList(java.util.ArrayList) ServerConfigurationFactory(org.apache.accumulo.server.conf.ServerConfigurationFactory) TRowRange(org.apache.accumulo.core.data.thrift.TRowRange) TRange(org.apache.accumulo.core.data.thrift.TRange) Range(org.apache.accumulo.core.data.Range) TKeyExtent(org.apache.accumulo.core.data.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) ZooKeeperInstance(org.apache.accumulo.core.client.ZooKeeperInstance) Assignment(org.apache.accumulo.server.master.state.Assignment) MetaDataStateStore(org.apache.accumulo.server.master.state.MetaDataStateStore) HostAndPort(org.apache.accumulo.core.util.HostAndPort) Iface(org.apache.accumulo.core.tabletserver.thrift.TabletClientService.Iface) TransactionWatcher(org.apache.accumulo.server.zookeeper.TransactionWatcher) MetaDataTableScanner(org.apache.accumulo.server.master.state.MetaDataTableScanner) TabletLocationState(org.apache.accumulo.server.master.state.TabletLocationState)

Example 67 with TServerInstance

use of org.apache.accumulo.server.master.state.TServerInstance in project accumulo by apache.

the class SplitRecoveryIT method splitPartiallyAndRecover.

private void splitPartiallyAndRecover(AccumuloServerContext context, KeyExtent extent, KeyExtent high, KeyExtent low, double splitRatio, SortedMap<FileRef, DataFileValue> mapFiles, Text midRow, String location, int steps, ZooLock zl) throws Exception {
    SortedMap<FileRef, DataFileValue> lowDatafileSizes = new TreeMap<>();
    SortedMap<FileRef, DataFileValue> highDatafileSizes = new TreeMap<>();
    List<FileRef> highDatafilesToRemove = new ArrayList<>();
    MetadataTableUtil.splitDatafiles(midRow, splitRatio, new HashMap<>(), mapFiles, lowDatafileSizes, highDatafileSizes, highDatafilesToRemove);
    MetadataTableUtil.splitTablet(high, extent.getPrevEndRow(), splitRatio, context, zl);
    TServerInstance instance = new TServerInstance(location, zl.getSessionId());
    Writer writer = MetadataTableUtil.getMetadataTable(context);
    Assignment assignment = new Assignment(high, instance);
    Mutation m = new Mutation(assignment.tablet.getMetadataEntry());
    assignment.server.putFutureLocation(m);
    writer.update(m);
    if (steps >= 1) {
        Map<Long, ? extends Collection<FileRef>> bulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, extent);
        MasterMetadataUtil.addNewTablet(context, low, "/lowDir", instance, lowDatafileSizes, bulkFiles, TabletTime.LOGICAL_TIME_ID + "0", -1l, -1l, zl);
    }
    if (steps >= 2) {
        MetadataTableUtil.finishSplit(high, highDatafileSizes, highDatafilesToRemove, context, zl);
    }
    TabletServer.verifyTabletInformation(context, high, instance, new TreeMap<>(), "127.0.0.1:0", zl);
    if (steps >= 1) {
        ensureTabletHasNoUnexpectedMetadataEntries(context, low, lowDatafileSizes);
        ensureTabletHasNoUnexpectedMetadataEntries(context, high, highDatafileSizes);
        Map<Long, ? extends Collection<FileRef>> lowBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, low);
        Map<Long, ? extends Collection<FileRef>> highBulkFiles = MetadataTableUtil.getBulkFilesLoaded(context, high);
        if (!lowBulkFiles.equals(highBulkFiles)) {
            throw new Exception(" " + lowBulkFiles + " != " + highBulkFiles + " " + low + " " + high);
        }
        if (lowBulkFiles.size() == 0) {
            throw new Exception(" no bulk files " + low);
        }
    } else {
        ensureTabletHasNoUnexpectedMetadataEntries(context, extent, mapFiles);
    }
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) TServerInstance(org.apache.accumulo.server.master.state.TServerInstance) Assignment(org.apache.accumulo.server.master.state.Assignment) FileRef(org.apache.accumulo.server.fs.FileRef) Mutation(org.apache.accumulo.core.data.Mutation) ZooReaderWriter(org.apache.accumulo.server.zookeeper.ZooReaderWriter) Writer(org.apache.accumulo.core.client.impl.Writer) IZooReaderWriter(org.apache.accumulo.fate.zookeeper.IZooReaderWriter)

Aggregations

TServerInstance (org.apache.accumulo.server.master.state.TServerInstance)67 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)35 HashMap (java.util.HashMap)22 Test (org.junit.Test)22 ArrayList (java.util.ArrayList)21 TabletServerStatus (org.apache.accumulo.core.master.thrift.TabletServerStatus)14 HashSet (java.util.HashSet)10 Value (org.apache.accumulo.core.data.Value)10 AccumuloServerContext (org.apache.accumulo.server.AccumuloServerContext)10 TServerConnection (org.apache.accumulo.server.master.LiveTServerSet.TServerConnection)9 TabletMigration (org.apache.accumulo.server.master.state.TabletMigration)9 TreeMap (java.util.TreeMap)8 Instance (org.apache.accumulo.core.client.Instance)8 Table (org.apache.accumulo.core.client.impl.Table)8 TKeyExtent (org.apache.accumulo.core.data.thrift.TKeyExtent)8 UUID (java.util.UUID)7 Key (org.apache.accumulo.core.data.Key)7 Text (org.apache.hadoop.io.Text)7 TException (org.apache.thrift.TException)7 List (java.util.List)6