Search in sources :

Example 1 with TabletFile

use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.

the class TabletMetadata method convertRow.

@VisibleForTesting
public static TabletMetadata convertRow(Iterator<Entry<Key, Value>> rowIter, EnumSet<ColumnType> fetchedColumns, boolean buildKeyValueMap) {
    Objects.requireNonNull(rowIter);
    TabletMetadata te = new TabletMetadata();
    final ImmutableSortedMap.Builder<Key, Value> kvBuilder = buildKeyValueMap ? ImmutableSortedMap.naturalOrder() : null;
    final var filesBuilder = ImmutableMap.<StoredTabletFile, DataFileValue>builder();
    final var scansBuilder = ImmutableList.<StoredTabletFile>builder();
    final var logsBuilder = ImmutableList.<LogEntry>builder();
    final var extCompBuilder = ImmutableMap.<ExternalCompactionId, ExternalCompactionMetadata>builder();
    final var loadedFilesBuilder = ImmutableMap.<TabletFile, Long>builder();
    ByteSequence row = null;
    while (rowIter.hasNext()) {
        final Entry<Key, Value> kv = rowIter.next();
        final Key key = kv.getKey();
        final String val = kv.getValue().toString();
        final String fam = key.getColumnFamilyData().toString();
        final String qual = key.getColumnQualifierData().toString();
        if (buildKeyValueMap) {
            kvBuilder.put(key, kv.getValue());
        }
        if (row == null) {
            row = key.getRowData();
            KeyExtent ke = KeyExtent.fromMetaRow(key.getRow());
            te.endRow = ke.endRow();
            te.tableId = ke.tableId();
        } else if (!row.equals(key.getRowData())) {
            throw new IllegalArgumentException("Input contains more than one row : " + row + " " + key.getRowData());
        }
        switch(fam.toString()) {
            case TabletColumnFamily.STR_NAME:
                switch(qual) {
                    case PREV_ROW_QUAL:
                        te.prevEndRow = TabletColumnFamily.decodePrevEndRow(kv.getValue());
                        te.sawPrevEndRow = true;
                        break;
                    case OLD_PREV_ROW_QUAL:
                        te.oldPrevEndRow = TabletColumnFamily.decodePrevEndRow(kv.getValue());
                        te.sawOldPrevEndRow = true;
                        break;
                    case SPLIT_RATIO_QUAL:
                        te.splitRatio = Double.parseDouble(val);
                        break;
                }
                break;
            case ServerColumnFamily.STR_NAME:
                switch(qual) {
                    case DIRECTORY_QUAL:
                        Preconditions.checkArgument(ServerColumnFamily.isValidDirCol(val), "Saw invalid dir name {} {}", key, val);
                        te.dirName = val;
                        break;
                    case TIME_QUAL:
                        te.time = MetadataTime.parse(val);
                        break;
                    case FLUSH_QUAL:
                        te.flush = OptionalLong.of(Long.parseLong(val));
                        break;
                    case COMPACT_QUAL:
                        te.compact = OptionalLong.of(Long.parseLong(val));
                        break;
                }
                break;
            case DataFileColumnFamily.STR_NAME:
                filesBuilder.put(new StoredTabletFile(qual), new DataFileValue(val));
                break;
            case BulkFileColumnFamily.STR_NAME:
                loadedFilesBuilder.put(new StoredTabletFile(qual), BulkFileColumnFamily.getBulkLoadTid(val));
                break;
            case CurrentLocationColumnFamily.STR_NAME:
                te.setLocationOnce(val, qual, LocationType.CURRENT);
                break;
            case FutureLocationColumnFamily.STR_NAME:
                te.setLocationOnce(val, qual, LocationType.FUTURE);
                break;
            case LastLocationColumnFamily.STR_NAME:
                te.last = new Location(val, qual, LocationType.LAST);
                break;
            case SuspendLocationColumn.STR_NAME:
                te.suspend = SuspendingTServer.fromValue(kv.getValue());
                break;
            case ScanFileColumnFamily.STR_NAME:
                scansBuilder.add(new StoredTabletFile(qual));
                break;
            case ClonedColumnFamily.STR_NAME:
                te.cloned = val;
                break;
            case LogColumnFamily.STR_NAME:
                logsBuilder.add(LogEntry.fromMetaWalEntry(kv));
                break;
            case ExternalCompactionColumnFamily.STR_NAME:
                extCompBuilder.put(ExternalCompactionId.of(qual), ExternalCompactionMetadata.fromJson(val));
                break;
            default:
                throw new IllegalStateException("Unexpected family " + fam);
        }
    }
    te.files = filesBuilder.build();
    te.loadedFiles = loadedFilesBuilder.build();
    te.fetchedCols = fetchedColumns;
    te.scans = scansBuilder.build();
    te.logs = logsBuilder.build();
    te.extCompactions = extCompBuilder.build();
    if (buildKeyValueMap) {
        te.keyValues = kvBuilder.build();
    }
    return te;
}
Also used : ImmutableSortedMap(com.google.common.collect.ImmutableSortedMap) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) Value(org.apache.accumulo.core.data.Value) OptionalLong(java.util.OptionalLong) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) Key(org.apache.accumulo.core.data.Key) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) ByteSequence(org.apache.accumulo.core.data.ByteSequence) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 2 with TabletFile

use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.

the class OfflineIterator method createIterator.

private SortedKeyValueIterator<Key, Value> createIterator(KeyExtent extent, Collection<StoredTabletFile> absFiles) throws TableNotFoundException, AccumuloException, IOException {
    // possible race condition here, if table is renamed
    String tableName = context.getTableName(tableId);
    AccumuloConfiguration acuTableConf = new ConfigurationCopy(context.tableOperations().getConfiguration(tableName));
    Configuration conf = context.getHadoopConf();
    for (SortedKeyValueIterator<Key, Value> reader : readers) {
        ((FileSKVIterator) reader).close();
    }
    readers.clear();
    SamplerConfiguration scannerSamplerConfig = options.getSamplerConfiguration();
    SamplerConfigurationImpl scannerSamplerConfigImpl = scannerSamplerConfig == null ? null : new SamplerConfigurationImpl(scannerSamplerConfig);
    SamplerConfigurationImpl samplerConfImpl = SamplerConfigurationImpl.newSamplerConfig(acuTableConf);
    if (scannerSamplerConfigImpl != null && !scannerSamplerConfigImpl.equals(samplerConfImpl)) {
        throw new SampleNotPresentException();
    }
    for (TabletFile file : absFiles) {
        FileSystem fs = VolumeConfiguration.fileSystemForPath(file.getPathStr(), conf);
        FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder().forFile(file.getPathStr(), fs, conf, CryptoServiceFactory.newDefaultInstance()).withTableConfiguration(acuTableConf).build();
        if (scannerSamplerConfigImpl != null) {
            reader = reader.getSample(scannerSamplerConfigImpl);
            if (reader == null)
                throw new SampleNotPresentException();
        }
        readers.add(reader);
    }
    MultiIterator multiIter = new MultiIterator(readers, extent);
    OfflineIteratorEnvironment iterEnv = new OfflineIteratorEnvironment(authorizations, acuTableConf, false, samplerConfImpl == null ? null : samplerConfImpl.toSamplerConfiguration());
    byte[] defaultSecurityLabel;
    ColumnVisibility cv = new ColumnVisibility(acuTableConf.get(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY));
    defaultSecurityLabel = cv.getExpression();
    SortedKeyValueIterator<Key, Value> visFilter = SystemIteratorUtil.setupSystemScanIterators(multiIter, new HashSet<>(options.fetchedColumns), authorizations, defaultSecurityLabel, acuTableConf);
    IterLoad iterLoad = IterConfigUtil.loadIterConf(IteratorScope.scan, options.serverSideIteratorList, options.serverSideIteratorOptions, acuTableConf);
    return iterEnv.getTopLevelIterator(IterConfigUtil.loadIterators(visFilter, iterLoad.iterEnv(iterEnv).useAccumuloClassLoader(false)));
}
Also used : FileSKVIterator(org.apache.accumulo.core.file.FileSKVIterator) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) MultiIterator(org.apache.accumulo.core.iteratorsImpl.system.MultiIterator) Configuration(org.apache.hadoop.conf.Configuration) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) VolumeConfiguration(org.apache.accumulo.core.volume.VolumeConfiguration) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration) SamplerConfigurationImpl(org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl) SamplerConfiguration(org.apache.accumulo.core.client.sample.SamplerConfiguration) SampleNotPresentException(org.apache.accumulo.core.client.SampleNotPresentException) IterLoad(org.apache.accumulo.core.conf.IterLoad) FileSystem(org.apache.hadoop.fs.FileSystem) KeyValue(org.apache.accumulo.core.data.KeyValue) Value(org.apache.accumulo.core.data.Value) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) ColumnVisibility(org.apache.accumulo.core.security.ColumnVisibility) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)

Example 3 with TabletFile

use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.

the class TabletFileTest method testNormalizePath.

@Test
public void testNormalizePath() {
    String uglyVolume = "hdfs://nn.somewhere.com:86753/accumulo/blah/.././/bad/bad2/../.././/////";
    String metadataEntry = uglyVolume + "/tables/" + id + "/" + dir + "/" + filename;
    TabletFile uglyFile = test(metadataEntry, "hdfs://nn.somewhere.com:86753/accumulo", id, dir, filename);
    TabletFile niceFile = new StoredTabletFile("hdfs://nn.somewhere.com:86753/accumulo/tables/" + id + "/" + dir + "/" + filename);
    assertEquals(niceFile, uglyFile);
    assertEquals(niceFile.hashCode(), uglyFile.hashCode());
}
Also used : StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) Test(org.junit.jupiter.api.Test)

Example 4 with TabletFile

use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.

the class VolumeUtil method updateTabletVolumes.

/**
 * This method does two things. First, it switches any volumes a tablet is using that are
 * configured in instance.volumes.replacements. Second, if a tablet dir is no longer configured
 * for use it chooses a new tablet directory.
 */
public static TabletFiles updateTabletVolumes(ServerContext context, ServiceLock zooLock, KeyExtent extent, TabletFiles tabletFiles, boolean replicate) {
    List<Pair<Path, Path>> replacements = context.getVolumeReplacements();
    if (replacements.isEmpty())
        return tabletFiles;
    log.trace("Using volume replacements: {}", replacements);
    List<LogEntry> logsToRemove = new ArrayList<>();
    List<LogEntry> logsToAdd = new ArrayList<>();
    List<StoredTabletFile> filesToRemove = new ArrayList<>();
    SortedMap<TabletFile, DataFileValue> filesToAdd = new TreeMap<>();
    TabletFiles ret = new TabletFiles();
    for (LogEntry logEntry : tabletFiles.logEntries) {
        LogEntry switchedLogEntry = switchVolumes(logEntry, replacements);
        if (switchedLogEntry != null) {
            logsToRemove.add(logEntry);
            logsToAdd.add(switchedLogEntry);
            ret.logEntries.add(switchedLogEntry);
            log.debug("Replacing volume {} : {} -> {}", extent, logEntry.filename, switchedLogEntry.filename);
        } else {
            ret.logEntries.add(logEntry);
        }
    }
    for (Entry<StoredTabletFile, DataFileValue> entry : tabletFiles.datafiles.entrySet()) {
        String metaPath = entry.getKey().getMetaUpdateDelete();
        Path switchedPath = switchVolume(metaPath, FileType.TABLE, replacements);
        if (switchedPath != null) {
            filesToRemove.add(entry.getKey());
            TabletFile switchedFile = new TabletFile(switchedPath);
            filesToAdd.put(switchedFile, entry.getValue());
            ret.datafiles.put(switchedFile.insert(), entry.getValue());
            log.debug("Replacing volume {} : {} -> {}", extent, metaPath, switchedPath);
        } else {
            ret.datafiles.put(entry.getKey(), entry.getValue());
        }
    }
    if (logsToRemove.size() + filesToRemove.size() > 0) {
        MetadataTableUtil.updateTabletVolumes(extent, logsToRemove, logsToAdd, filesToRemove, filesToAdd, zooLock, context);
        if (replicate) {
            @SuppressWarnings("deprecation") Status status = org.apache.accumulo.server.replication.StatusUtil.fileClosed();
            log.debug("Tablet directory switched, need to record old log files {} {}", logsToRemove, ProtobufUtil.toString(status));
            // Before deleting these logs, we need to mark them for replication
            for (LogEntry logEntry : logsToRemove) {
                ReplicationTableUtil.updateFiles(context, extent, logEntry.filename, status);
            }
        }
    }
    // method this should return the exact strings that are in the metadata table
    ret.dirName = tabletFiles.dirName;
    return ret;
}
Also used : Path(org.apache.hadoop.fs.Path) Status(org.apache.accumulo.server.replication.proto.Replication.Status) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) LogEntry(org.apache.accumulo.core.tabletserver.log.LogEntry) Pair(org.apache.accumulo.core.util.Pair)

Example 5 with TabletFile

use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.

the class FileCompactor method openMapDataFiles.

private List<SortedKeyValueIterator<Key, Value>> openMapDataFiles(ArrayList<FileSKVIterator> readers) throws IOException {
    List<SortedKeyValueIterator<Key, Value>> iters = new ArrayList<>(filesToCompact.size());
    for (TabletFile mapFile : filesToCompact.keySet()) {
        try {
            FileOperations fileFactory = FileOperations.getInstance();
            FileSystem fs = this.fs.getFileSystemByPath(mapFile.getPath());
            FileSKVIterator reader;
            reader = fileFactory.newReaderBuilder().forFile(mapFile.getPathStr(), fs, fs.getConf(), context.getCryptoService()).withTableConfiguration(acuTableConf).withRateLimiter(env.getReadLimiter()).build();
            readers.add(reader);
            SortedKeyValueIterator<Key, Value> iter = new ProblemReportingIterator(context, extent.tableId(), mapFile.getPathStr(), false, reader);
            if (filesToCompact.get(mapFile).isTimeSet()) {
                iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
            }
            iters.add(iter);
        } catch (Exception e) {
            ProblemReports.getInstance(context).report(new ProblemReport(extent.tableId(), ProblemType.FILE_READ, mapFile.getPathStr(), e));
            log.warn("Some problem opening map file {} {}", mapFile, e.getMessage(), e);
            // failed to open some map file... close the ones that were opened
            for (FileSKVIterator reader : readers) {
                try {
                    reader.close();
                } catch (Exception e2) {
                    log.warn("Failed to close map file", e2);
                }
            }
            readers.clear();
            if (e instanceof IOException)
                throw (IOException) e;
            throw new IOException("Failed to open map data files", e);
        }
    }
    return iters;
}
Also used : FileSKVIterator(org.apache.accumulo.core.file.FileSKVIterator) ArrayList(java.util.ArrayList) FileOperations(org.apache.accumulo.core.file.FileOperations) SortedKeyValueIterator(org.apache.accumulo.core.iterators.SortedKeyValueIterator) TimeSettingIterator(org.apache.accumulo.core.iteratorsImpl.system.TimeSettingIterator) IOException(java.io.IOException) IOException(java.io.IOException) ProblemReport(org.apache.accumulo.server.problems.ProblemReport) FileSystem(org.apache.hadoop.fs.FileSystem) ProblemReportingIterator(org.apache.accumulo.server.problems.ProblemReportingIterator) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) Key(org.apache.accumulo.core.data.Key)

Aggregations

TabletFile (org.apache.accumulo.core.metadata.TabletFile)36 StoredTabletFile (org.apache.accumulo.core.metadata.StoredTabletFile)20 IOException (java.io.IOException)17 Path (org.apache.hadoop.fs.Path)15 ArrayList (java.util.ArrayList)14 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)14 Key (org.apache.accumulo.core.data.Key)13 FileSystem (org.apache.hadoop.fs.FileSystem)13 HashMap (java.util.HashMap)12 TreeMap (java.util.TreeMap)10 Value (org.apache.accumulo.core.data.Value)8 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)8 HashSet (java.util.HashSet)6 AccumuloConfiguration (org.apache.accumulo.core.conf.AccumuloConfiguration)6 FileSKVIterator (org.apache.accumulo.core.file.FileSKVIterator)6 SortedKeyValueIterator (org.apache.accumulo.core.iterators.SortedKeyValueIterator)6 Text (org.apache.hadoop.io.Text)6 Map (java.util.Map)5 PartialKey (org.apache.accumulo.core.data.PartialKey)5 List (java.util.List)4