Search in sources :

Example 1 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class BulkImport method load.

@Override
public void load() throws TableNotFoundException, IOException, AccumuloException, AccumuloSecurityException {
    TableId tableId = context.getTableId(tableName);
    FileSystem fs = VolumeConfiguration.fileSystemForPath(dir, context.getHadoopConf());
    Path srcPath = checkPath(fs, dir);
    SortedMap<KeyExtent, Bulk.Files> mappings;
    TableOperationsImpl tableOps = new TableOperationsImpl(context);
    int maxTablets = 0;
    for (var prop : tableOps.getProperties(tableName)) {
        if (prop.getKey().equals(Property.TABLE_BULK_MAX_TABLETS.getKey())) {
            maxTablets = Integer.parseInt(prop.getValue());
            break;
        }
    }
    Retry retry = Retry.builder().infiniteRetries().retryAfter(100, MILLISECONDS).incrementBy(100, MILLISECONDS).maxWait(2, MINUTES).backOffFactor(1.5).logInterval(3, MINUTES).createRetry();
    // retry if a merge occurs
    boolean shouldRetry = true;
    while (shouldRetry) {
        if (plan == null) {
            mappings = computeMappingFromFiles(fs, tableId, srcPath, maxTablets);
        } else {
            mappings = computeMappingFromPlan(fs, tableId, srcPath, maxTablets);
        }
        if (mappings.isEmpty()) {
            if (ignoreEmptyDir == true) {
                log.info("Attempted to import files from empty directory - {}. Zero files imported", srcPath);
                return;
            } else {
                throw new IllegalArgumentException("Attempted to import zero files from " + srcPath);
            }
        }
        BulkSerialize.writeLoadMapping(mappings, srcPath.toString(), fs::create);
        List<ByteBuffer> args = Arrays.asList(ByteBuffer.wrap(tableId.canonical().getBytes(UTF_8)), ByteBuffer.wrap(srcPath.toString().getBytes(UTF_8)), ByteBuffer.wrap((setTime + "").getBytes(UTF_8)));
        try {
            tableOps.doBulkFateOperation(args, tableName);
            shouldRetry = false;
        } catch (AccumuloBulkMergeException ae) {
            if (plan != null) {
                checkPlanForSplits(ae);
            }
            try {
                retry.waitForNextAttempt();
            } catch (InterruptedException e) {
                throw new RuntimeException(e);
            }
            log.info(ae.getMessage() + ". Retrying bulk import to " + tableName);
        }
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) Path(org.apache.hadoop.fs.Path) AccumuloBulkMergeException(org.apache.accumulo.core.clientImpl.AccumuloBulkMergeException) TableOperationsImpl(org.apache.accumulo.core.clientImpl.TableOperationsImpl) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) ByteBuffer(java.nio.ByteBuffer) FileSystem(org.apache.hadoop.fs.FileSystem) Retry(org.apache.accumulo.fate.util.Retry) Files(org.apache.accumulo.core.clientImpl.bulk.Bulk.Files)

Example 2 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class BulkImport method estimateSizes.

public static Map<KeyExtent, Long> estimateSizes(AccumuloConfiguration acuConf, Path mapFile, long fileSize, Collection<KeyExtent> extents, FileSystem ns, Cache<String, Long> fileLenCache, CryptoService cs) throws IOException {
    if (extents.size() == 1) {
        return Collections.singletonMap(extents.iterator().next(), fileSize);
    }
    long totalIndexEntries = 0;
    Map<KeyExtent, MLong> counts = new TreeMap<>();
    for (KeyExtent keyExtent : extents) counts.put(keyExtent, new MLong(0));
    Text row = new Text();
    FileSKVIterator index = FileOperations.getInstance().newIndexReaderBuilder().forFile(mapFile.toString(), ns, ns.getConf(), cs).withTableConfiguration(acuConf).withFileLenCache(fileLenCache).build();
    try {
        while (index.hasTop()) {
            Key key = index.getTopKey();
            totalIndexEntries++;
            key.getRow(row);
            // TODO this could use a binary search
            for (Entry<KeyExtent, MLong> entry : counts.entrySet()) if (entry.getKey().contains(row))
                entry.getValue().l++;
            index.next();
        }
    } finally {
        try {
            if (index != null)
                index.close();
        } catch (IOException e) {
            log.debug("Failed to close " + mapFile, e);
        }
    }
    Map<KeyExtent, Long> results = new TreeMap<>();
    for (KeyExtent keyExtent : extents) {
        double numEntries = counts.get(keyExtent).l;
        if (numEntries == 0)
            numEntries = 1;
        long estSize = (long) ((numEntries / totalIndexEntries) * fileSize);
        results.put(keyExtent, estSize);
    }
    return results;
}
Also used : FileSKVIterator(org.apache.accumulo.core.file.FileSKVIterator) Text(org.apache.hadoop.io.Text) IOException(java.io.IOException) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Key(org.apache.accumulo.core.data.Key)

Example 3 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class BulkImport method computeFileToTabletMappings.

public SortedMap<KeyExtent, Bulk.Files> computeFileToTabletMappings(FileSystem fs, TableId tableId, Path dirPath, Executor executor, ClientContext context, int maxTablets) throws IOException {
    KeyExtentCache extentCache = new ConcurrentKeyExtentCache(tableId, context);
    List<FileStatus> files = filterInvalid(fs.listStatus(dirPath, p -> !p.getName().equals(Constants.BULK_LOAD_MAPPING)));
    // we know all of the file lens, so construct a cache and populate it in order to avoid later
    // trips to the namenode
    Cache<String, Long> fileLensCache = getPopulatedFileLenCache(dirPath, files);
    List<CompletableFuture<Map<KeyExtent, Bulk.FileInfo>>> futures = new ArrayList<>();
    CryptoService cs = CryptoServiceFactory.newDefaultInstance();
    for (FileStatus fileStatus : files) {
        Path filePath = fileStatus.getPath();
        CompletableFuture<Map<KeyExtent, Bulk.FileInfo>> future = CompletableFuture.supplyAsync(() -> {
            try {
                long t1 = System.currentTimeMillis();
                List<KeyExtent> extents = findOverlappingTablets(context, extentCache, filePath, fs, fileLensCache, cs);
                // make sure file isn't going to too many tablets
                checkTabletCount(maxTablets, extents.size(), filePath.toString());
                Map<KeyExtent, Long> estSizes = estimateSizes(context.getConfiguration(), filePath, fileStatus.getLen(), extents, fs, fileLensCache, cs);
                Map<KeyExtent, Bulk.FileInfo> pathLocations = new HashMap<>();
                for (KeyExtent ke : extents) {
                    pathLocations.put(ke, new Bulk.FileInfo(filePath, estSizes.getOrDefault(ke, 0L)));
                }
                long t2 = System.currentTimeMillis();
                log.debug("Mapped {} to {} tablets in {}ms", filePath, pathLocations.size(), t2 - t1);
                return pathLocations;
            } catch (Exception e) {
                throw new CompletionException(e);
            }
        }, executor);
        futures.add(future);
    }
    SortedMap<KeyExtent, Bulk.Files> mappings = new TreeMap<>();
    for (CompletableFuture<Map<KeyExtent, Bulk.FileInfo>> future : futures) {
        try {
            Map<KeyExtent, Bulk.FileInfo> pathMapping = future.get();
            pathMapping.forEach((ext, fi) -> mappings.computeIfAbsent(ext, k -> new Files()).add(fi));
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new RuntimeException(e);
        } catch (ExecutionException e) {
            throw new RuntimeException(e);
        }
    }
    return mergeOverlapping(mappings);
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ByteSequence(org.apache.accumulo.core.data.ByteSequence) Arrays(java.util.Arrays) FileSystem(org.apache.hadoop.fs.FileSystem) LoggerFactory(org.slf4j.LoggerFactory) Text(org.apache.hadoop.io.Text) FileStatus(org.apache.hadoop.fs.FileStatus) ByteBuffer(java.nio.ByteBuffer) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) CachableBlockFile.pathToCacheId(org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.pathToCacheId) ConfigurationTypeHelper(org.apache.accumulo.core.conf.ConfigurationTypeHelper) FileOperations(org.apache.accumulo.core.file.FileOperations) AccumuloBulkMergeException(org.apache.accumulo.core.clientImpl.AccumuloBulkMergeException) TableOperationsImpl(org.apache.accumulo.core.clientImpl.TableOperationsImpl) Map(java.util.Map) Path(org.apache.hadoop.fs.Path) FileInfo(org.apache.accumulo.core.clientImpl.bulk.Bulk.FileInfo) Property(org.apache.accumulo.core.conf.Property) LoadPlan(org.apache.accumulo.core.data.LoadPlan) ClientContext(org.apache.accumulo.core.clientImpl.ClientContext) Collection(java.util.Collection) FileSKVIterator(org.apache.accumulo.core.file.FileSKVIterator) Set(java.util.Set) CompletionException(java.util.concurrent.CompletionException) ThreadPools(org.apache.accumulo.core.util.threads.ThreadPools) MILLISECONDS(java.util.concurrent.TimeUnit.MILLISECONDS) RangeType(org.apache.accumulo.core.data.LoadPlan.RangeType) FileNotFoundException(java.io.FileNotFoundException) Sets(com.google.common.collect.Sets) VolumeConfiguration(org.apache.accumulo.core.volume.VolumeConfiguration) Objects(java.util.Objects) List(java.util.List) Stream(java.util.stream.Stream) Entry(java.util.Map.Entry) Files(org.apache.accumulo.core.clientImpl.bulk.Bulk.Files) ImportDestinationArguments(org.apache.accumulo.core.client.admin.TableOperations.ImportDestinationArguments) ImportMappingOptions(org.apache.accumulo.core.client.admin.TableOperations.ImportMappingOptions) CacheBuilder(com.google.common.cache.CacheBuilder) SortedMap(java.util.SortedMap) FilenameUtils(org.apache.commons.io.FilenameUtils) Collectors.groupingBy(java.util.stream.Collectors.groupingBy) Destination(org.apache.accumulo.core.data.LoadPlan.Destination) MINUTES(java.util.concurrent.TimeUnit.MINUTES) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) Key(org.apache.accumulo.core.data.Key) ExecutorService(java.util.concurrent.ExecutorService) EXISTING_TABLE_NAME(org.apache.accumulo.core.util.Validators.EXISTING_TABLE_NAME) Retry(org.apache.accumulo.fate.util.Retry) Logger(org.slf4j.Logger) CryptoService(org.apache.accumulo.core.spi.crypto.CryptoService) Executor(java.util.concurrent.Executor) UTF_8(java.nio.charset.StandardCharsets.UTF_8) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) IOException(java.io.IOException) Constants(org.apache.accumulo.core.Constants) CryptoServiceFactory(org.apache.accumulo.core.crypto.CryptoServiceFactory) AccumuloException(org.apache.accumulo.core.client.AccumuloException) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) Range(org.apache.accumulo.core.data.Range) ExecutionException(java.util.concurrent.ExecutionException) TreeMap(java.util.TreeMap) Preconditions(com.google.common.base.Preconditions) Cache(com.google.common.cache.Cache) Collections(java.util.Collections) ClientProperty(org.apache.accumulo.core.conf.ClientProperty) FileStatus(org.apache.hadoop.fs.FileStatus) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) FileInfo(org.apache.accumulo.core.clientImpl.bulk.Bulk.FileInfo) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) CompletableFuture(java.util.concurrent.CompletableFuture) FileInfo(org.apache.accumulo.core.clientImpl.bulk.Bulk.FileInfo) CryptoService(org.apache.accumulo.core.spi.crypto.CryptoService) Files(org.apache.accumulo.core.clientImpl.bulk.Bulk.Files) ExecutionException(java.util.concurrent.ExecutionException) Path(org.apache.hadoop.fs.Path) TreeMap(java.util.TreeMap) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) AccumuloBulkMergeException(org.apache.accumulo.core.clientImpl.AccumuloBulkMergeException) CompletionException(java.util.concurrent.CompletionException) FileNotFoundException(java.io.FileNotFoundException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) IOException(java.io.IOException) AccumuloException(org.apache.accumulo.core.client.AccumuloException) ExecutionException(java.util.concurrent.ExecutionException) CompletionException(java.util.concurrent.CompletionException) Map(java.util.Map) SortedMap(java.util.SortedMap) HashMap(java.util.HashMap) TreeMap(java.util.TreeMap)

Example 4 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class BulkImport method findOverlappingTablets.

public static List<KeyExtent> findOverlappingTablets(KeyExtentCache extentCache, FileSKVIterator reader) throws IOException {
    List<KeyExtent> result = new ArrayList<>();
    Collection<ByteSequence> columnFamilies = Collections.emptyList();
    Text row = new Text();
    while (true) {
        reader.seek(new Range(row, null), columnFamilies, false);
        if (!reader.hasTop()) {
            break;
        }
        row = reader.getTopKey().getRow();
        KeyExtent extent = extentCache.lookup(row);
        result.add(extent);
        row = extent.endRow();
        if (row != null) {
            row = nextRow(row);
        } else
            break;
    }
    return result;
}
Also used : ArrayList(java.util.ArrayList) Text(org.apache.hadoop.io.Text) Range(org.apache.accumulo.core.data.Range) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) ByteSequence(org.apache.accumulo.core.data.ByteSequence)

Example 5 with KeyExtent

use of org.apache.accumulo.core.dataImpl.KeyExtent in project accumulo by apache.

the class TabletServerBatchReaderIterator method doLookup.

static void doLookup(ClientContext context, String server, Map<KeyExtent, List<Range>> requested, Map<KeyExtent, List<Range>> failures, Map<KeyExtent, List<Range>> unscanned, ResultReceiver receiver, List<Column> columns, ScannerOptions options, Authorizations authorizations, TimeoutTracker timeoutTracker) throws IOException, AccumuloSecurityException, AccumuloServerException {
    if (requested.isEmpty()) {
        return;
    }
    // copy requested to unscanned map. we will remove ranges as they are scanned in trackScanning()
    for (Entry<KeyExtent, List<Range>> entry : requested.entrySet()) {
        ArrayList<Range> ranges = new ArrayList<>();
        for (Range range : entry.getValue()) {
            ranges.add(new Range(range));
        }
        unscanned.put(KeyExtent.copyOf(entry.getKey()), ranges);
    }
    timeoutTracker.startingScan();
    try {
        final HostAndPort parsedServer = HostAndPort.fromString(server);
        final TabletClientService.Client client;
        if (timeoutTracker.getTimeOut() < context.getClientTimeoutInMillis())
            client = ThriftUtil.getTServerClient(parsedServer, context, timeoutTracker.getTimeOut());
        else
            client = ThriftUtil.getTServerClient(parsedServer, context);
        try {
            OpTimer timer = null;
            if (log.isTraceEnabled()) {
                log.trace("tid={} Starting multi scan, tserver={}  #tablets={}  #ranges={} ssil={} ssio={}", Thread.currentThread().getId(), server, requested.size(), sumSizes(requested.values()), options.serverSideIteratorList, options.serverSideIteratorOptions);
                timer = new OpTimer().start();
            }
            TabletType ttype = TabletType.type(requested.keySet());
            boolean waitForWrites = !ThriftScanner.serversWaitedForWrites.get(ttype).contains(server);
            // @formatter:off
            Map<TKeyExtent, List<TRange>> thriftTabletRanges = requested.entrySet().stream().collect(Collectors.toMap((entry) -> entry.getKey().toThrift(), (entry) -> entry.getValue().stream().map(Range::toThrift).collect(Collectors.toList())));
            // @formatter:on
            Map<String, String> execHints = options.executionHints.isEmpty() ? null : options.executionHints;
            InitialMultiScan imsr = client.startMultiScan(TraceUtil.traceInfo(), context.rpcCreds(), thriftTabletRanges, columns.stream().map(Column::toThrift).collect(Collectors.toList()), options.serverSideIteratorList, options.serverSideIteratorOptions, ByteBufferUtil.toByteBuffers(authorizations.getAuthorizations()), waitForWrites, SamplerConfigurationImpl.toThrift(options.getSamplerConfiguration()), options.batchTimeOut, options.classLoaderContext, execHints);
            if (waitForWrites)
                ThriftScanner.serversWaitedForWrites.get(ttype).add(server.toString());
            MultiScanResult scanResult = imsr.result;
            if (timer != null) {
                timer.stop();
                log.trace("tid={} Got 1st multi scan results, #results={} {} in {}", Thread.currentThread().getId(), scanResult.results.size(), (scanResult.more ? "scanID=" + imsr.scanID : ""), String.format("%.3f secs", timer.scale(SECONDS)));
            }
            ArrayList<Entry<Key, Value>> entries = new ArrayList<>(scanResult.results.size());
            for (TKeyValue kv : scanResult.results) {
                entries.add(new SimpleImmutableEntry<>(new Key(kv.key), new Value(kv.value)));
            }
            if (!entries.isEmpty())
                receiver.receive(entries);
            if (!entries.isEmpty() || !scanResult.fullScans.isEmpty())
                timeoutTracker.madeProgress();
            trackScanning(failures, unscanned, scanResult);
            AtomicLong nextOpid = new AtomicLong();
            while (scanResult.more) {
                timeoutTracker.check();
                if (timer != null) {
                    log.trace("tid={} oid={} Continuing multi scan, scanid={}", Thread.currentThread().getId(), nextOpid.get(), imsr.scanID);
                    timer.reset().start();
                }
                scanResult = client.continueMultiScan(TraceUtil.traceInfo(), imsr.scanID);
                if (timer != null) {
                    timer.stop();
                    log.trace("tid={} oid={} Got more multi scan results, #results={} {} in {}", Thread.currentThread().getId(), nextOpid.getAndIncrement(), scanResult.results.size(), (scanResult.more ? " scanID=" + imsr.scanID : ""), String.format("%.3f secs", timer.scale(SECONDS)));
                }
                entries = new ArrayList<>(scanResult.results.size());
                for (TKeyValue kv : scanResult.results) {
                    entries.add(new SimpleImmutableEntry<>(new Key(kv.key), new Value(kv.value)));
                }
                if (!entries.isEmpty())
                    receiver.receive(entries);
                if (!entries.isEmpty() || !scanResult.fullScans.isEmpty())
                    timeoutTracker.madeProgress();
                trackScanning(failures, unscanned, scanResult);
            }
            client.closeMultiScan(TraceUtil.traceInfo(), imsr.scanID);
        } finally {
            ThriftUtil.returnClient(client, context);
        }
    } catch (TTransportException e) {
        log.debug("Server : {} msg : {}", server, e.getMessage());
        timeoutTracker.errorOccured();
        throw new IOException(e);
    } catch (ThriftSecurityException e) {
        log.debug("Server : {} msg : {}", server, e.getMessage(), e);
        throw new AccumuloSecurityException(e.user, e.code, e);
    } catch (TApplicationException e) {
        log.debug("Server : {} msg : {}", server, e.getMessage(), e);
        throw new AccumuloServerException(server, e);
    } catch (NoSuchScanIDException e) {
        log.debug("Server : {} msg : {}", server, e.getMessage(), e);
        throw new IOException(e);
    } catch (TSampleNotPresentException e) {
        log.debug("Server : " + server + " msg : " + e.getMessage(), e);
        String tableInfo = "?";
        if (e.getExtent() != null) {
            TableId tableId = KeyExtent.fromThrift(e.getExtent()).tableId();
            tableInfo = context.getPrintableTableInfoFromId(tableId);
        }
        String message = "Table " + tableInfo + " does not have sampling configured or built";
        throw new SampleNotPresentException(message, e);
    } catch (TException e) {
        log.debug("Server : {} msg : {}", server, e.getMessage(), e);
        timeoutTracker.errorOccured();
        throw new IOException(e);
    }
}
Also used : TableId(org.apache.accumulo.core.data.TableId) ThriftUtil(org.apache.accumulo.core.rpc.ThriftUtil) ListIterator(java.util.ListIterator) TTransportException(org.apache.thrift.transport.TTransportException) MultiScanResult(org.apache.accumulo.core.dataImpl.thrift.MultiScanResult) LoggerFactory(org.slf4j.LoggerFactory) TKeyValue(org.apache.accumulo.core.dataImpl.thrift.TKeyValue) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) Map(java.util.Map) TabletClientService(org.apache.accumulo.core.tabletserver.thrift.TabletClientService) Value(org.apache.accumulo.core.data.Value) NoSuchScanIDException(org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException) Column(org.apache.accumulo.core.data.Column) Collection(java.util.Collection) Set(java.util.Set) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) Collectors(java.util.stream.Collectors) ArrayBlockingQueue(java.util.concurrent.ArrayBlockingQueue) List(java.util.List) Entry(java.util.Map.Entry) TRange(org.apache.accumulo.core.dataImpl.thrift.TRange) TraceUtil(org.apache.accumulo.core.trace.TraceUtil) ByteBufferUtil(org.apache.accumulo.core.util.ByteBufferUtil) TimedOutException(org.apache.accumulo.core.client.TimedOutException) SamplerConfigurationImpl(org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl) HostAndPort(org.apache.accumulo.core.util.HostAndPort) HashMap(java.util.HashMap) SimpleImmutableEntry(java.util.AbstractMap.SimpleImmutableEntry) TableDeletedException(org.apache.accumulo.core.client.TableDeletedException) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) TSampleNotPresentException(org.apache.accumulo.core.tabletserver.thrift.TSampleNotPresentException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) Key(org.apache.accumulo.core.data.Key) InitialMultiScan(org.apache.accumulo.core.dataImpl.thrift.InitialMultiScan) NoSuchElementException(java.util.NoSuchElementException) TApplicationException(org.apache.thrift.TApplicationException) ExecutorService(java.util.concurrent.ExecutorService) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) Logger(org.slf4j.Logger) Iterator(java.util.Iterator) Semaphore(java.util.concurrent.Semaphore) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) TException(org.apache.thrift.TException) IOException(java.io.IOException) SampleNotPresentException(org.apache.accumulo.core.client.SampleNotPresentException) Authorizations(org.apache.accumulo.core.security.Authorizations) AccumuloException(org.apache.accumulo.core.client.AccumuloException) Range(org.apache.accumulo.core.data.Range) AtomicLong(java.util.concurrent.atomic.AtomicLong) OpTimer(org.apache.accumulo.core.util.OpTimer) Collections(java.util.Collections) SECONDS(java.util.concurrent.TimeUnit.SECONDS) TableId(org.apache.accumulo.core.data.TableId) TException(org.apache.thrift.TException) ArrayList(java.util.ArrayList) TTransportException(org.apache.thrift.transport.TTransportException) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) HostAndPort(org.apache.accumulo.core.util.HostAndPort) Entry(java.util.Map.Entry) SimpleImmutableEntry(java.util.AbstractMap.SimpleImmutableEntry) MultiScanResult(org.apache.accumulo.core.dataImpl.thrift.MultiScanResult) Column(org.apache.accumulo.core.data.Column) List(java.util.List) ArrayList(java.util.ArrayList) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) TKeyValue(org.apache.accumulo.core.dataImpl.thrift.TKeyValue) InitialMultiScan(org.apache.accumulo.core.dataImpl.thrift.InitialMultiScan) TKeyExtent(org.apache.accumulo.core.dataImpl.thrift.TKeyExtent) IOException(java.io.IOException) TRange(org.apache.accumulo.core.dataImpl.thrift.TRange) Range(org.apache.accumulo.core.data.Range) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) NoSuchScanIDException(org.apache.accumulo.core.tabletserver.thrift.NoSuchScanIDException) TApplicationException(org.apache.thrift.TApplicationException) AtomicLong(java.util.concurrent.atomic.AtomicLong) TSampleNotPresentException(org.apache.accumulo.core.tabletserver.thrift.TSampleNotPresentException) SampleNotPresentException(org.apache.accumulo.core.client.SampleNotPresentException) TSampleNotPresentException(org.apache.accumulo.core.tabletserver.thrift.TSampleNotPresentException) OpTimer(org.apache.accumulo.core.util.OpTimer) TKeyValue(org.apache.accumulo.core.dataImpl.thrift.TKeyValue) Value(org.apache.accumulo.core.data.Value) TabletClientService(org.apache.accumulo.core.tabletserver.thrift.TabletClientService) Key(org.apache.accumulo.core.data.Key)

Aggregations

KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)239 Text (org.apache.hadoop.io.Text)98 ArrayList (java.util.ArrayList)72 HashMap (java.util.HashMap)60 Value (org.apache.accumulo.core.data.Value)57 Key (org.apache.accumulo.core.data.Key)56 TableId (org.apache.accumulo.core.data.TableId)53 Test (org.junit.Test)52 Mutation (org.apache.accumulo.core.data.Mutation)47 IOException (java.io.IOException)40 List (java.util.List)40 TKeyExtent (org.apache.accumulo.core.dataImpl.thrift.TKeyExtent)39 HashSet (java.util.HashSet)38 TreeMap (java.util.TreeMap)38 Range (org.apache.accumulo.core.data.Range)38 Map (java.util.Map)33 Scanner (org.apache.accumulo.core.client.Scanner)31 Entry (java.util.Map.Entry)30 AccumuloClient (org.apache.accumulo.core.client.AccumuloClient)30 Test (org.junit.jupiter.api.Test)30