Search in sources :

Example 11 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class CompactableImplTest method newECM.

private static ExternalCompactionMetadata newECM(Set<StoredTabletFile> jobFiles, Set<StoredTabletFile> nextFiles, CompactionKind kind, boolean propagateDeletes, boolean initiallySelectedAll, Long compactionId) {
    TabletFile compactTmpName = newFile("C00000A.rf_tmp");
    String compactorId = "cid";
    short priority = 9;
    CompactionExecutorId ceid = CompactionExecutorIdImpl.externalId("ecs1");
    return new ExternalCompactionMetadata(jobFiles, nextFiles, compactTmpName, compactorId, kind, priority, ceid, propagateDeletes, initiallySelectedAll, compactionId);
}
Also used : ExternalCompactionMetadata(org.apache.accumulo.core.metadata.schema.ExternalCompactionMetadata) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) CompactionExecutorId(org.apache.accumulo.core.spi.compaction.CompactionExecutorId)

Example 12 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class CompactionPlanTest method testInputNotInAllFiles.

@Test
public void testInputNotInAllFiles() {
    CompactionPlan cp1 = new CompactionPlan();
    StoredTabletFile fr1 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-1/1.rf");
    StoredTabletFile fr2 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-1/2.rf");
    StoredTabletFile fr3 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-2/3.rf");
    cp1.inputFiles.add(fr1);
    cp1.inputFiles.add(fr2);
    cp1.inputFiles.add(fr3);
    Set<StoredTabletFile> allFiles = Set.of(fr1, fr2);
    assertThrows(IllegalStateException.class, () -> cp1.validate(allFiles));
}
Also used : StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) Test(org.junit.Test)

Example 13 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class CompactionPlanTest method testDeleteNotInAllFiles.

@Test
public void testDeleteNotInAllFiles() {
    CompactionPlan cp1 = new CompactionPlan();
    StoredTabletFile fr1 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-1/1.rf");
    StoredTabletFile fr2 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-1/2.rf");
    StoredTabletFile fr3 = new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-2/3.rf");
    cp1.deleteFiles.add(fr1);
    cp1.deleteFiles.add(fr2);
    cp1.deleteFiles.add(fr3);
    Set<StoredTabletFile> allFiles = Set.of(fr1, fr2);
    assertThrows(IllegalStateException.class, () -> cp1.validate(allFiles));
}
Also used : StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) Test(org.junit.Test)

Example 14 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class SplitRecoveryIT method runSplitRecoveryTest.

private void runSplitRecoveryTest(ServerContext context, int failPoint, String mr, int extentToSplit, ServiceLock zl, KeyExtent... extents) throws Exception {
    Text midRow = new Text(mr);
    SortedMap<StoredTabletFile, DataFileValue> splitMapFiles = null;
    for (int i = 0; i < extents.length; i++) {
        KeyExtent extent = extents[i];
        String dirName = "dir_" + i;
        String tdir = context.getTablesDirs().iterator().next() + "/" + extent.tableId() + "/" + dirName;
        MetadataTableUtil.addTablet(extent, dirName, context, TimeType.LOGICAL, zl);
        SortedMap<TabletFile, DataFileValue> mapFiles = new TreeMap<>();
        mapFiles.put(new TabletFile(new Path(tdir + "/" + RFile.EXTENSION + "_000_000")), new DataFileValue(1000017 + i, 10000 + i));
        int tid = 0;
        TransactionWatcher.ZooArbitrator.start(context, Constants.BULK_ARBITRATOR_TYPE, tid);
        SortedMap<StoredTabletFile, DataFileValue> storedFiles = new TreeMap<>(MetadataTableUtil.updateTabletDataFile(tid, extent, mapFiles, new MetadataTime(0, TimeType.LOGICAL), context, zl));
        if (i == extentToSplit) {
            splitMapFiles = storedFiles;
        }
    }
    KeyExtent extent = extents[extentToSplit];
    KeyExtent high = new KeyExtent(extent.tableId(), extent.endRow(), midRow);
    KeyExtent low = new KeyExtent(extent.tableId(), midRow, extent.prevEndRow());
    splitPartiallyAndRecover(context, extent, high, low, .4, splitMapFiles, midRow, "localhost:1234", failPoint, zl);
}
Also used : Path(org.apache.hadoop.fs.Path) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) Text(org.apache.hadoop.io.Text) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) TreeMap(java.util.TreeMap) KeyExtent(org.apache.accumulo.core.dataImpl.KeyExtent) MetadataTime(org.apache.accumulo.core.metadata.schema.MetadataTime)

Example 15 with StoredTabletFile

use of org.apache.accumulo.core.metadata.StoredTabletFile in project accumulo by apache.

the class Compactor method createCompactionJob.

/**
 * Create compaction runnable
 *
 * @param job
 *          compaction job
 * @param totalInputEntries
 *          object to capture total entries
 * @param totalInputBytes
 *          object to capture input file size
 * @param started
 *          started latch
 * @param stopped
 *          stopped latch
 * @param err
 *          reference to error
 * @return Runnable compaction job
 */
protected Runnable createCompactionJob(final TExternalCompactionJob job, final LongAdder totalInputEntries, final LongAdder totalInputBytes, final CountDownLatch started, final CountDownLatch stopped, final AtomicReference<Throwable> err) {
    return new Runnable() {

        @Override
        public void run() {
            // Its only expected that a single compaction runs at a time. Multiple compactions running
            // at a time could cause odd behavior like out of order and unexpected thrift calls to the
            // coordinator. This is a sanity check to ensure the expectation is met. Should this check
            // ever fail, it means there is a bug elsewhere.
            Preconditions.checkState(compactionRunning.compareAndSet(false, true));
            try {
                LOG.info("Starting up compaction runnable for job: {}", job);
                TCompactionStatusUpdate update = new TCompactionStatusUpdate(TCompactionState.STARTED, "Compaction started", -1, -1, -1);
                updateCompactionState(job, update);
                final AccumuloConfiguration tConfig;
                var extent = KeyExtent.fromThrift(job.getExtent());
                if (!job.getOverrides().isEmpty()) {
                    tConfig = new ConfigurationCopy(getContext().getTableConfiguration(extent.tableId()));
                    job.getOverrides().forEach((k, v) -> ((ConfigurationCopy) tConfig).set(k, v));
                    LOG.debug("Overriding table properties with {}", job.getOverrides());
                } else {
                    tConfig = getContext().getTableConfiguration(extent.tableId());
                }
                final TabletFile outputFile = new TabletFile(new Path(job.getOutputFile()));
                final Map<StoredTabletFile, DataFileValue> files = new TreeMap<>();
                job.getFiles().forEach(f -> {
                    files.put(new StoredTabletFile(f.getMetadataFileEntry()), new DataFileValue(f.getSize(), f.getEntries(), f.getTimestamp()));
                    totalInputEntries.add(f.getEntries());
                    totalInputBytes.add(f.getSize());
                });
                final List<IteratorSetting> iters = new ArrayList<>();
                job.getIteratorSettings().getIterators().forEach(tis -> iters.add(SystemIteratorUtil.toIteratorSetting(tis)));
                ExtCEnv cenv = new ExtCEnv(JOB_HOLDER, queueName);
                FileCompactor compactor = new FileCompactor(getContext(), extent, files, outputFile, job.isPropagateDeletes(), cenv, iters, tConfig);
                LOG.trace("Starting compactor");
                started.countDown();
                org.apache.accumulo.server.compaction.CompactionStats stat = compactor.call();
                TCompactionStats cs = new TCompactionStats();
                cs.setEntriesRead(stat.getEntriesRead());
                cs.setEntriesWritten(stat.getEntriesWritten());
                cs.setFileSize(stat.getFileSize());
                JOB_HOLDER.setStats(cs);
                LOG.info("Compaction completed successfully {} ", job.getExternalCompactionId());
                // Update state when completed
                TCompactionStatusUpdate update2 = new TCompactionStatusUpdate(TCompactionState.SUCCEEDED, "Compaction completed successfully", -1, -1, -1);
                updateCompactionState(job, update2);
            } catch (Exception e) {
                LOG.error("Compaction failed", e);
                err.set(e);
            } finally {
                stopped.countDown();
                Preconditions.checkState(compactionRunning.compareAndSet(true, false));
            }
        }
    };
}
Also used : Path(org.apache.hadoop.fs.Path) ConfigurationCopy(org.apache.accumulo.core.conf.ConfigurationCopy) DataFileValue(org.apache.accumulo.core.metadata.schema.DataFileValue) TCompactionStatusUpdate(org.apache.accumulo.core.compaction.thrift.TCompactionStatusUpdate) ArrayList(java.util.ArrayList) TreeMap(java.util.TreeMap) TTransportException(org.apache.thrift.transport.TTransportException) TableNotFoundException(org.apache.accumulo.core.client.TableNotFoundException) ThriftSecurityException(org.apache.accumulo.core.clientImpl.thrift.ThriftSecurityException) RetriesExceededException(org.apache.accumulo.server.compaction.RetryableThriftCall.RetriesExceededException) AccumuloSecurityException(org.apache.accumulo.core.client.AccumuloSecurityException) KeeperException(org.apache.zookeeper.KeeperException) TException(org.apache.thrift.TException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ThriftTableOperationException(org.apache.accumulo.core.clientImpl.thrift.ThriftTableOperationException) UnknownCompactionIdException(org.apache.accumulo.core.compaction.thrift.UnknownCompactionIdException) IteratorSetting(org.apache.accumulo.core.client.IteratorSetting) TCompactionStats(org.apache.accumulo.core.tabletserver.thrift.TCompactionStats) FileCompactor(org.apache.accumulo.server.compaction.FileCompactor) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) TabletFile(org.apache.accumulo.core.metadata.TabletFile) StoredTabletFile(org.apache.accumulo.core.metadata.StoredTabletFile) AccumuloConfiguration(org.apache.accumulo.core.conf.AccumuloConfiguration)

Aggregations

StoredTabletFile (org.apache.accumulo.core.metadata.StoredTabletFile)47 DataFileValue (org.apache.accumulo.core.metadata.schema.DataFileValue)25 TabletFile (org.apache.accumulo.core.metadata.TabletFile)18 IOException (java.io.IOException)12 KeyExtent (org.apache.accumulo.core.dataImpl.KeyExtent)11 HashMap (java.util.HashMap)9 HashSet (java.util.HashSet)9 Key (org.apache.accumulo.core.data.Key)9 ArrayList (java.util.ArrayList)8 TreeMap (java.util.TreeMap)8 Value (org.apache.accumulo.core.data.Value)8 Path (org.apache.hadoop.fs.Path)7 Text (org.apache.hadoop.io.Text)7 Pair (org.apache.accumulo.core.util.Pair)6 MajorCompactionRequest (org.apache.accumulo.tserver.compaction.MajorCompactionRequest)6 Test (org.junit.Test)6 LogEntry (org.apache.accumulo.core.tabletserver.log.LogEntry)5 UncheckedIOException (java.io.UncheckedIOException)4 CompactionConfig (org.apache.accumulo.core.client.admin.CompactionConfig)4 TServerInstance (org.apache.accumulo.core.metadata.TServerInstance)4