Search in sources :

Example 26 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class MetadataTableUtil method updateTabletDataFile.

public static void updateTabletDataFile(long tid, KeyExtent extent, Map<FileRef, DataFileValue> estSizes, String time, ClientContext context, ZooLock zooLock) {
    Mutation m = new Mutation(extent.getMetadataEntry());
    byte[] tidBytes = Long.toString(tid).getBytes(UTF_8);
    for (Entry<FileRef, DataFileValue> entry : estSizes.entrySet()) {
        Text file = entry.getKey().meta();
        m.put(DataFileColumnFamily.NAME, file, new Value(entry.getValue().encode()));
        m.put(TabletsSection.BulkFileColumnFamily.NAME, file, new Value(tidBytes));
    }
    TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8)));
    update(context, zooLock, m, extent);
}
Also used : DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) FileRef(org.apache.accumulo.server.fs.FileRef) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) Mutation(org.apache.accumulo.core.data.Mutation)

Example 27 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class MetadataTableUtil method deleteTable.

public static void deleteTable(Table.ID tableId, boolean insertDeletes, ClientContext context, ZooLock lock) throws AccumuloException, IOException {
    try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY);
        BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000).setMaxLatency(120000l, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) {
        // scan metadata for our table and delete everything we find
        Mutation m = null;
        ms.setRange(new KeyExtent(tableId, null, null).toMetadataRange());
        // insert deletes before deleting data from metadata... this makes the code fault tolerant
        if (insertDeletes) {
            ms.fetchColumnFamily(DataFileColumnFamily.NAME);
            TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.fetch(ms);
            for (Entry<Key, Value> cell : ms) {
                Key key = cell.getKey();
                if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
                    FileRef ref = new FileRef(VolumeManagerImpl.get(), key);
                    bw.addMutation(createDeleteMutation(tableId, ref.meta().toString()));
                }
                if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
                    bw.addMutation(createDeleteMutation(tableId, cell.getValue().toString()));
                }
            }
            bw.flush();
            ms.clearColumns();
        }
        for (Entry<Key, Value> cell : ms) {
            Key key = cell.getKey();
            if (m == null) {
                m = new Mutation(key.getRow());
                if (lock != null)
                    putLockID(lock, m);
            }
            if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) {
                bw.addMutation(m);
                m = new Mutation(key.getRow());
                if (lock != null)
                    putLockID(lock, m);
            }
            m.putDelete(key.getColumnFamily(), key.getColumnQualifier());
        }
        if (m != null)
            bw.addMutation(m);
    }
}
Also used : IsolatedScanner(org.apache.accumulo.core.client.IsolatedScanner) Scanner(org.apache.accumulo.core.client.Scanner) ScannerImpl(org.apache.accumulo.core.client.impl.ScannerImpl) FileRef(org.apache.accumulo.server.fs.FileRef) BatchWriterImpl(org.apache.accumulo.core.client.impl.BatchWriterImpl) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) BatchWriterConfig(org.apache.accumulo.core.client.BatchWriterConfig) BatchWriter(org.apache.accumulo.core.client.BatchWriter) Mutation(org.apache.accumulo.core.data.Mutation) KeyExtent(org.apache.accumulo.core.data.impl.KeyExtent) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 28 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class FileUtil method tryToGetFirstAndLastRows.

public static Map<FileRef, FileInfo> tryToGetFirstAndLastRows(VolumeManager fs, AccumuloConfiguration acuConf, Set<FileRef> mapfiles) {
    HashMap<FileRef, FileInfo> mapFilesInfo = new HashMap<>();
    long t1 = System.currentTimeMillis();
    for (FileRef mapfile : mapfiles) {
        FileSKVIterator reader = null;
        FileSystem ns = fs.getVolumeByPath(mapfile.path()).getFileSystem();
        try {
            reader = FileOperations.getInstance().newReaderBuilder().forFile(mapfile.toString(), ns, ns.getConf()).withTableConfiguration(acuConf).build();
            Key firstKey = reader.getFirstKey();
            if (firstKey != null) {
                mapFilesInfo.put(mapfile, new FileInfo(firstKey, reader.getLastKey()));
            }
        } catch (IOException ioe) {
            log.warn("Failed to read map file to determine first and last key : " + mapfile, ioe);
        } finally {
            if (reader != null) {
                try {
                    reader.close();
                } catch (IOException ioe) {
                    log.warn("failed to close " + mapfile, ioe);
                }
            }
        }
    }
    long t2 = System.currentTimeMillis();
    log.debug(String.format("Found first and last keys for %d map files in %6.2f secs", mapfiles.size(), (t2 - t1) / 1000.0));
    return mapFilesInfo;
}
Also used : FileSKVIterator(org.apache.accumulo.core.file.FileSKVIterator) FileRef(org.apache.accumulo.server.fs.FileRef) HashMap(java.util.HashMap) FileSystem(org.apache.hadoop.fs.FileSystem) IOException(java.io.IOException) Key(org.apache.accumulo.core.data.Key) PartialKey(org.apache.accumulo.core.data.PartialKey)

Example 29 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class FileUtilTest method testToPathStrings.

@Test
public void testToPathStrings() {
    Collection<FileRef> c = new java.util.ArrayList<>();
    FileRef r1 = createMock(FileRef.class);
    expect(r1.path()).andReturn(new Path("/foo"));
    replay(r1);
    c.add(r1);
    FileRef r2 = createMock(FileRef.class);
    expect(r2.path()).andReturn(new Path("/bar"));
    replay(r2);
    c.add(r2);
    Collection<String> cs = FileUtil.toPathStrings(c);
    Assert.assertEquals(2, cs.size());
    Iterator<String> iter = cs.iterator();
    Assert.assertEquals("/foo", iter.next());
    Assert.assertEquals("/bar", iter.next());
}
Also used : Path(org.apache.hadoop.fs.Path) FileRef(org.apache.accumulo.server.fs.FileRef) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 30 with FileRef

use of org.apache.accumulo.server.fs.FileRef in project accumulo by apache.

the class Compactor method openMapDataFiles.

private List<SortedKeyValueIterator<Key, Value>> openMapDataFiles(String lgName, ArrayList<FileSKVIterator> readers) throws IOException {
    List<SortedKeyValueIterator<Key, Value>> iters = new ArrayList<>(filesToCompact.size());
    for (FileRef mapFile : filesToCompact.keySet()) {
        try {
            FileOperations fileFactory = FileOperations.getInstance();
            FileSystem fs = this.fs.getVolumeByPath(mapFile.path()).getFileSystem();
            FileSKVIterator reader;
            reader = fileFactory.newReaderBuilder().forFile(mapFile.path().toString(), fs, fs.getConf()).withTableConfiguration(acuTableConf).withRateLimiter(env.getReadLimiter()).build();
            readers.add(reader);
            SortedKeyValueIterator<Key, Value> iter = new ProblemReportingIterator(context, extent.getTableId(), mapFile.path().toString(), false, reader);
            if (filesToCompact.get(mapFile).isTimeSet()) {
                iter = new TimeSettingIterator(iter, filesToCompact.get(mapFile).getTime());
            }
            iters.add(iter);
        } catch (Throwable e) {
            ProblemReports.getInstance(context).report(new ProblemReport(extent.getTableId(), ProblemType.FILE_READ, mapFile.path().toString(), e));
            log.warn("Some problem opening map file {} {}", mapFile, e.getMessage(), e);
            // failed to open some map file... close the ones that were opened
            for (FileSKVIterator reader : readers) {
                try {
                    reader.close();
                } catch (Throwable e2) {
                    log.warn("Failed to close map file", e2);
                }
            }
            readers.clear();
            if (e instanceof IOException)
                throw (IOException) e;
            throw new IOException("Failed to open map data files", e);
        }
    }
    return iters;
}
Also used : FileSKVIterator(org.apache.accumulo.core.file.FileSKVIterator) ArrayList(java.util.ArrayList) FileOperations(org.apache.accumulo.core.file.FileOperations) SortedKeyValueIterator(org.apache.accumulo.core.iterators.SortedKeyValueIterator) TimeSettingIterator(org.apache.accumulo.core.iterators.system.TimeSettingIterator) IOException(java.io.IOException) ProblemReport(org.apache.accumulo.server.problems.ProblemReport) FileRef(org.apache.accumulo.server.fs.FileRef) FileSystem(org.apache.hadoop.fs.FileSystem) ProblemReportingIterator(org.apache.accumulo.server.problems.ProblemReportingIterator) Value(org.apache.accumulo.core.data.Value) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Key(org.apache.accumulo.core.data.Key)

Aggregations

FileRef (org.apache.accumulo.server.fs.FileRef)62 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)36 Value (org.apache.accumulo.core.data.Value)17 Key (org.apache.accumulo.core.data.Key)16 ArrayList (java.util.ArrayList)15 HashMap (java.util.HashMap)13 KeyExtent (org.apache.accumulo.core.data.impl.KeyExtent)13 IOException (java.io.IOException)12 Test (org.junit.Test)12 Text (org.apache.hadoop.io.Text)11 Mutation (org.apache.accumulo.core.data.Mutation)10 VolumeManager (org.apache.accumulo.server.fs.VolumeManager)10 Scanner (org.apache.accumulo.core.client.Scanner)9 PartialKey (org.apache.accumulo.core.data.PartialKey)9 TreeMap (java.util.TreeMap)8 FileSystem (org.apache.hadoop.fs.FileSystem)8 Path (org.apache.hadoop.fs.Path)8 HashSet (java.util.HashSet)7 IsolatedScanner (org.apache.accumulo.core.client.IsolatedScanner)6 ScannerImpl (org.apache.accumulo.core.client.impl.ScannerImpl)6