use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class TabletMetadata method convertRow.
@VisibleForTesting
public static TabletMetadata convertRow(Iterator<Entry<Key, Value>> rowIter, EnumSet<ColumnType> fetchedColumns, boolean buildKeyValueMap) {
Objects.requireNonNull(rowIter);
TabletMetadata te = new TabletMetadata();
final ImmutableSortedMap.Builder<Key, Value> kvBuilder = buildKeyValueMap ? ImmutableSortedMap.naturalOrder() : null;
final var filesBuilder = ImmutableMap.<StoredTabletFile, DataFileValue>builder();
final var scansBuilder = ImmutableList.<StoredTabletFile>builder();
final var logsBuilder = ImmutableList.<LogEntry>builder();
final var extCompBuilder = ImmutableMap.<ExternalCompactionId, ExternalCompactionMetadata>builder();
final var loadedFilesBuilder = ImmutableMap.<TabletFile, Long>builder();
ByteSequence row = null;
while (rowIter.hasNext()) {
final Entry<Key, Value> kv = rowIter.next();
final Key key = kv.getKey();
final String val = kv.getValue().toString();
final String fam = key.getColumnFamilyData().toString();
final String qual = key.getColumnQualifierData().toString();
if (buildKeyValueMap) {
kvBuilder.put(key, kv.getValue());
}
if (row == null) {
row = key.getRowData();
KeyExtent ke = KeyExtent.fromMetaRow(key.getRow());
te.endRow = ke.endRow();
te.tableId = ke.tableId();
} else if (!row.equals(key.getRowData())) {
throw new IllegalArgumentException("Input contains more than one row : " + row + " " + key.getRowData());
}
switch(fam.toString()) {
case TabletColumnFamily.STR_NAME:
switch(qual) {
case PREV_ROW_QUAL:
te.prevEndRow = TabletColumnFamily.decodePrevEndRow(kv.getValue());
te.sawPrevEndRow = true;
break;
case OLD_PREV_ROW_QUAL:
te.oldPrevEndRow = TabletColumnFamily.decodePrevEndRow(kv.getValue());
te.sawOldPrevEndRow = true;
break;
case SPLIT_RATIO_QUAL:
te.splitRatio = Double.parseDouble(val);
break;
}
break;
case ServerColumnFamily.STR_NAME:
switch(qual) {
case DIRECTORY_QUAL:
Preconditions.checkArgument(ServerColumnFamily.isValidDirCol(val), "Saw invalid dir name {} {}", key, val);
te.dirName = val;
break;
case TIME_QUAL:
te.time = MetadataTime.parse(val);
break;
case FLUSH_QUAL:
te.flush = OptionalLong.of(Long.parseLong(val));
break;
case COMPACT_QUAL:
te.compact = OptionalLong.of(Long.parseLong(val));
break;
}
break;
case DataFileColumnFamily.STR_NAME:
filesBuilder.put(new StoredTabletFile(qual), new DataFileValue(val));
break;
case BulkFileColumnFamily.STR_NAME:
loadedFilesBuilder.put(new StoredTabletFile(qual), BulkFileColumnFamily.getBulkLoadTid(val));
break;
case CurrentLocationColumnFamily.STR_NAME:
te.setLocationOnce(val, qual, LocationType.CURRENT);
break;
case FutureLocationColumnFamily.STR_NAME:
te.setLocationOnce(val, qual, LocationType.FUTURE);
break;
case LastLocationColumnFamily.STR_NAME:
te.last = new Location(val, qual, LocationType.LAST);
break;
case SuspendLocationColumn.STR_NAME:
te.suspend = SuspendingTServer.fromValue(kv.getValue());
break;
case ScanFileColumnFamily.STR_NAME:
scansBuilder.add(new StoredTabletFile(qual));
break;
case ClonedColumnFamily.STR_NAME:
te.cloned = val;
break;
case LogColumnFamily.STR_NAME:
logsBuilder.add(LogEntry.fromMetaWalEntry(kv));
break;
case ExternalCompactionColumnFamily.STR_NAME:
extCompBuilder.put(ExternalCompactionId.of(qual), ExternalCompactionMetadata.fromJson(val));
break;
default:
throw new IllegalStateException("Unexpected family " + fam);
}
}
te.files = filesBuilder.build();
te.loadedFiles = loadedFilesBuilder.build();
te.fetchedCols = fetchedColumns;
te.scans = scansBuilder.build();
te.logs = logsBuilder.build();
te.extCompactions = extCompBuilder.build();
if (buildKeyValueMap) {
te.keyValues = kvBuilder.build();
}
return te;
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class OfflineIterator method createIterator.
private SortedKeyValueIterator<Key, Value> createIterator(KeyExtent extent, Collection<StoredTabletFile> absFiles) throws TableNotFoundException, AccumuloException, IOException {
// possible race condition here, if table is renamed
String tableName = context.getTableName(tableId);
AccumuloConfiguration acuTableConf = new ConfigurationCopy(context.tableOperations().getConfiguration(tableName));
Configuration conf = context.getHadoopConf();
for (SortedKeyValueIterator<Key, Value> reader : readers) {
((FileSKVIterator) reader).close();
}
readers.clear();
SamplerConfiguration scannerSamplerConfig = options.getSamplerConfiguration();
SamplerConfigurationImpl scannerSamplerConfigImpl = scannerSamplerConfig == null ? null : new SamplerConfigurationImpl(scannerSamplerConfig);
SamplerConfigurationImpl samplerConfImpl = SamplerConfigurationImpl.newSamplerConfig(acuTableConf);
if (scannerSamplerConfigImpl != null && !scannerSamplerConfigImpl.equals(samplerConfImpl)) {
throw new SampleNotPresentException();
}
for (TabletFile file : absFiles) {
FileSystem fs = VolumeConfiguration.fileSystemForPath(file.getPathStr(), conf);
FileSKVIterator reader = FileOperations.getInstance().newReaderBuilder().forFile(file.getPathStr(), fs, conf, CryptoServiceFactory.newDefaultInstance()).withTableConfiguration(acuTableConf).build();
if (scannerSamplerConfigImpl != null) {
reader = reader.getSample(scannerSamplerConfigImpl);
if (reader == null)
throw new SampleNotPresentException();
}
readers.add(reader);
}
MultiIterator multiIter = new MultiIterator(readers, extent);
OfflineIteratorEnvironment iterEnv = new OfflineIteratorEnvironment(authorizations, acuTableConf, false, samplerConfImpl == null ? null : samplerConfImpl.toSamplerConfiguration());
byte[] defaultSecurityLabel;
ColumnVisibility cv = new ColumnVisibility(acuTableConf.get(Property.TABLE_DEFAULT_SCANTIME_VISIBILITY));
defaultSecurityLabel = cv.getExpression();
SortedKeyValueIterator<Key, Value> visFilter = SystemIteratorUtil.setupSystemScanIterators(multiIter, new HashSet<>(options.fetchedColumns), authorizations, defaultSecurityLabel, acuTableConf);
IterLoad iterLoad = IterConfigUtil.loadIterConf(IteratorScope.scan, options.serverSideIteratorList, options.serverSideIteratorOptions, acuTableConf);
return iterEnv.getTopLevelIterator(IterConfigUtil.loadIterators(visFilter, iterLoad.iterEnv(iterEnv).useAccumuloClassLoader(false)));
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class TabletFileTest method testNormalizePath.
@Test
public void testNormalizePath() {
String uglyVolume = "hdfs://nn.somewhere.com:86753/accumulo/blah/.././/bad/bad2/../.././/////";
String metadataEntry = uglyVolume + "/tables/" + id + "/" + dir + "/" + filename;
TabletFile uglyFile = test(metadataEntry, "hdfs://nn.somewhere.com:86753/accumulo", id, dir, filename);
TabletFile niceFile = new StoredTabletFile("hdfs://nn.somewhere.com:86753/accumulo/tables/" + id + "/" + dir + "/" + filename);
assertEquals(niceFile, uglyFile);
assertEquals(niceFile.hashCode(), uglyFile.hashCode());
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class VolumeUtil method updateTabletVolumes.
/**
* This method does two things. First, it switches any volumes a tablet is using that are
* configured in instance.volumes.replacements. Second, if a tablet dir is no longer configured
* for use it chooses a new tablet directory.
*/
public static TabletFiles updateTabletVolumes(ServerContext context, ServiceLock zooLock, KeyExtent extent, TabletFiles tabletFiles, boolean replicate) {
List<Pair<Path, Path>> replacements = context.getVolumeReplacements();
if (replacements.isEmpty())
return tabletFiles;
log.trace("Using volume replacements: {}", replacements);
List<LogEntry> logsToRemove = new ArrayList<>();
List<LogEntry> logsToAdd = new ArrayList<>();
List<StoredTabletFile> filesToRemove = new ArrayList<>();
SortedMap<TabletFile, DataFileValue> filesToAdd = new TreeMap<>();
TabletFiles ret = new TabletFiles();
for (LogEntry logEntry : tabletFiles.logEntries) {
LogEntry switchedLogEntry = switchVolumes(logEntry, replacements);
if (switchedLogEntry != null) {
logsToRemove.add(logEntry);
logsToAdd.add(switchedLogEntry);
ret.logEntries.add(switchedLogEntry);
log.debug("Replacing volume {} : {} -> {}", extent, logEntry.filename, switchedLogEntry.filename);
} else {
ret.logEntries.add(logEntry);
}
}
for (Entry<StoredTabletFile, DataFileValue> entry : tabletFiles.datafiles.entrySet()) {
String metaPath = entry.getKey().getMetaUpdateDelete();
Path switchedPath = switchVolume(metaPath, FileType.TABLE, replacements);
if (switchedPath != null) {
filesToRemove.add(entry.getKey());
TabletFile switchedFile = new TabletFile(switchedPath);
filesToAdd.put(switchedFile, entry.getValue());
ret.datafiles.put(switchedFile.insert(), entry.getValue());
log.debug("Replacing volume {} : {} -> {}", extent, metaPath, switchedPath);
} else {
ret.datafiles.put(entry.getKey(), entry.getValue());
}
}
if (logsToRemove.size() + filesToRemove.size() > 0) {
MetadataTableUtil.updateTabletVolumes(extent, logsToRemove, logsToAdd, filesToRemove, filesToAdd, zooLock, context);
if (replicate) {
@SuppressWarnings("deprecation") Status status = org.apache.accumulo.server.replication.StatusUtil.fileClosed();
log.debug("Tablet directory switched, need to record old log files {} {}", logsToRemove, ProtobufUtil.toString(status));
// Before deleting these logs, we need to mark them for replication
for (LogEntry logEntry : logsToRemove) {
ReplicationTableUtil.updateFiles(context, extent, logEntry.filename, status);
}
}
}
// method this should return the exact strings that are in the metadata table
ret.dirName = tabletFiles.dirName;
return ret;
}
use of org.apache.accumulo.core.metadata.TabletFile in project accumulo by apache.
the class FileCompactor method openMapDataFiles.
private List<SortedKeyValueIterator<Key, Value>> openMapDataFiles(ArrayList<FileSKVIterator> readers) throws IOException {
List<SortedKeyValueIterator<Key, Value>> iters = new ArrayList<>(filesToCompact.size());
for (TabletFile mapFile : filesToCompact.keySet()) {
try {
FileOperations fileFactory = FileOperations.getInstance();
FileSystem fs = this.fs.getFileSystemByPath(mapFile.getPath());
FileSKVIterator reader;
reader = fileFactory.newReaderBuilder().forFile(mapFile.getPathStr(), fs, fs.getConf(), context.getCryptoService()).withTableConfiguration(acuTableConf).withRateLimiter(env.getReadLimiter()).build();
readers.add(reader);
SortedKeyValueIterator<Key, Value> iter = new ProblemReportingIterator(context, extent.tableId(), mapFile.getPathStr(), false, reader);
if (filesToCompact.get(mapFile).isTimeSet()) {
iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
}
iters.add(iter);
} catch (Exception e) {
ProblemReports.getInstance(context).report(new ProblemReport(extent.tableId(), ProblemType.FILE_READ, mapFile.getPathStr(), e));
log.warn("Some problem opening map file {} {}", mapFile, e.getMessage(), e);
// failed to open some map file... close the ones that were opened
for (FileSKVIterator reader : readers) {
try {
reader.close();
} catch (Exception e2) {
log.warn("Failed to close map file", e2);
}
}
readers.clear();
if (e instanceof IOException)
throw (IOException) e;
throw new IOException("Failed to open map data files", e);
}
}
return iters;
}
Aggregations