use of com.google.common.util.concurrent.RateLimiter in project cassandra by apache.
the class CompactionManager method antiCompactGroup.
@VisibleForTesting
int antiCompactGroup(ColumnFamilyStore cfs, RangesAtEndpoint ranges, LifecycleTransaction txn, UUID pendingRepair, BooleanSupplier isCancelled) {
Preconditions.checkArgument(!ranges.isEmpty(), "need at least one full or transient range");
long groupMaxDataAge = -1;
for (Iterator<SSTableReader> i = txn.originals().iterator(); i.hasNext(); ) {
SSTableReader sstable = i.next();
if (groupMaxDataAge < sstable.maxDataAge)
groupMaxDataAge = sstable.maxDataAge;
}
if (txn.originals().size() == 0) {
logger.info("No valid anticompactions for this group, All sstables were compacted and are no longer available");
return 0;
}
logger.info("Anticompacting {} in {}.{} for {}", txn.originals(), cfs.keyspace.getName(), cfs.getTableName(), pendingRepair);
Set<SSTableReader> sstableAsSet = txn.originals();
File destination = cfs.getDirectories().getWriteableLocationAsFile(cfs.getExpectedCompactedFileSize(sstableAsSet, OperationType.ANTICOMPACTION));
int nowInSec = FBUtilities.nowInSeconds();
RateLimiter limiter = getRateLimiter();
/**
* HACK WARNING
*
* We have multiple writers operating over the same Transaction, producing different sets of sstables that all
* logically replace the transaction's originals. The SSTableRewriter assumes it has exclusive control over
* the transaction state, and this will lead to temporarily inconsistent sstable/tracker state if we do not
* take special measures to avoid it.
*
* Specifically, if a number of rewriter have prepareToCommit() invoked in sequence, then two problematic things happen:
* 1. The obsoleteOriginals() call of the first rewriter immediately remove the originals from the tracker, despite
* their having been only partially replaced. To avoid this, we must either avoid obsoleteOriginals() or checkpoint()
* 2. The LifecycleTransaction may only have prepareToCommit() invoked once, and this will checkpoint() also.
*
* Similarly commit() would finalise partially complete on-disk state.
*
* To avoid these problems, we introduce a SharedTxn that proxies all calls onto the underlying transaction
* except prepareToCommit(), checkpoint(), obsoleteOriginals(), and commit().
* We then invoke these methods directly once each of the rewriter has updated the transaction
* with their share of replacements.
*
* Note that for the same essential reason we also explicitly disable early open.
* By noop-ing checkpoint we avoid any of the problems with early open, but by continuing to explicitly
* disable it we also prevent any of the extra associated work from being performed.
*/
class SharedTxn extends WrappedLifecycleTransaction {
public SharedTxn(ILifecycleTransaction delegate) {
super(delegate);
}
public Throwable commit(Throwable accumulate) {
return accumulate;
}
public void prepareToCommit() {
}
public void checkpoint() {
}
public void obsoleteOriginals() {
}
public void close() {
}
}
CompactionStrategyManager strategy = cfs.getCompactionStrategyManager();
try (SharedTxn sharedTxn = new SharedTxn(txn);
SSTableRewriter fullWriter = SSTableRewriter.constructWithoutEarlyOpening(sharedTxn, false, groupMaxDataAge);
SSTableRewriter transWriter = SSTableRewriter.constructWithoutEarlyOpening(sharedTxn, false, groupMaxDataAge);
SSTableRewriter unrepairedWriter = SSTableRewriter.constructWithoutEarlyOpening(sharedTxn, false, groupMaxDataAge);
AbstractCompactionStrategy.ScannerList scanners = strategy.getScanners(txn.originals());
CompactionController controller = new CompactionController(cfs, sstableAsSet, getDefaultGcBefore(cfs, nowInSec));
CompactionIterator ci = getAntiCompactionIterator(scanners.scanners, controller, nowInSec, UUIDGen.getTimeUUID(), active, isCancelled)) {
int expectedBloomFilterSize = Math.max(cfs.metadata().params.minIndexInterval, (int) (SSTableReader.getApproximateKeyCount(sstableAsSet)));
fullWriter.switchWriter(CompactionManager.createWriterForAntiCompaction(cfs, destination, expectedBloomFilterSize, UNREPAIRED_SSTABLE, pendingRepair, false, sstableAsSet, txn));
transWriter.switchWriter(CompactionManager.createWriterForAntiCompaction(cfs, destination, expectedBloomFilterSize, UNREPAIRED_SSTABLE, pendingRepair, true, sstableAsSet, txn));
unrepairedWriter.switchWriter(CompactionManager.createWriterForAntiCompaction(cfs, destination, expectedBloomFilterSize, UNREPAIRED_SSTABLE, NO_PENDING_REPAIR, false, sstableAsSet, txn));
Predicate<Token> fullChecker = !ranges.onlyFull().isEmpty() ? new Range.OrderedRangeContainmentChecker(ranges.onlyFull().ranges()) : t -> false;
Predicate<Token> transChecker = !ranges.onlyTransient().isEmpty() ? new Range.OrderedRangeContainmentChecker(ranges.onlyTransient().ranges()) : t -> false;
double compressionRatio = scanners.getCompressionRatio();
if (compressionRatio == MetadataCollector.NO_COMPRESSION_RATIO)
compressionRatio = 1.0;
long lastBytesScanned = 0;
while (ci.hasNext()) {
try (UnfilteredRowIterator partition = ci.next()) {
Token token = partition.partitionKey().getToken();
// if this row is contained in the full or transient ranges, append it to the appropriate sstable
if (fullChecker.test(token)) {
fullWriter.append(partition);
} else if (transChecker.test(token)) {
transWriter.append(partition);
} else {
// otherwise, append it to the unrepaired sstable
unrepairedWriter.append(partition);
}
long bytesScanned = scanners.getTotalBytesScanned();
compactionRateLimiterAcquire(limiter, bytesScanned, lastBytesScanned, compressionRatio);
lastBytesScanned = bytesScanned;
}
}
fullWriter.prepareToCommit();
transWriter.prepareToCommit();
unrepairedWriter.prepareToCommit();
txn.checkpoint();
txn.obsoleteOriginals();
txn.prepareToCommit();
List<SSTableReader> fullSSTables = new ArrayList<>(fullWriter.finished());
List<SSTableReader> transSSTables = new ArrayList<>(transWriter.finished());
List<SSTableReader> unrepairedSSTables = new ArrayList<>(unrepairedWriter.finished());
fullWriter.commit();
transWriter.commit();
unrepairedWriter.commit();
txn.commit();
logger.info("Anticompacted {} in {}.{} to full = {}, transient = {}, unrepaired = {} for {}", sstableAsSet, cfs.keyspace.getName(), cfs.getTableName(), fullSSTables, transSSTables, unrepairedSSTables, pendingRepair);
return fullSSTables.size() + transSSTables.size() + unrepairedSSTables.size();
} catch (Throwable e) {
if (e instanceof CompactionInterruptedException && isCancelled.getAsBoolean()) {
logger.info("Anticompaction has been canceled for session {}", pendingRepair);
logger.trace(e.getMessage(), e);
} else {
JVMStabilityInspector.inspectThrowable(e);
logger.error("Error anticompacting " + txn + " for " + pendingRepair, e);
}
throw e;
}
}
use of com.google.common.util.concurrent.RateLimiter in project micro-service by Lovnx.
the class Task method testRateLimiter.
/**
* RateLimiter类似于JDK的信号量Semphore,他用来限制对资源并发访问的线程数
*/
public static void testRateLimiter() {
ListeningExecutorService executorService = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool());
// 每秒不超过4个任务被提交
RateLimiter limiter = RateLimiter.create(5.0);
for (int i = 0; i < 10; i++) {
// 请求RateLimiter, 超过permits会被阻塞
limiter.acquire();
final ListenableFuture<Integer> listenableFuture = executorService.submit(new Task("is " + i));
}
}
use of com.google.common.util.concurrent.RateLimiter in project distributedlog by twitter.
the class TestNonBlockingReadsMultiReader method testMultiReaders.
@Test(timeout = 60000)
public void testMultiReaders() throws Exception {
String name = "distrlog-multireaders";
final RateLimiter limiter = RateLimiter.create(1000);
DistributedLogConfiguration confLocal = new DistributedLogConfiguration();
confLocal.setOutputBufferSize(0);
confLocal.setImmediateFlushEnabled(true);
DistributedLogManager dlmwrite = createNewDLM(confLocal, name);
final AsyncLogWriter writer = dlmwrite.startAsyncLogSegmentNonPartitioned();
FutureUtils.result(writer.write(DLMTestUtil.getLogRecordInstance(0)));
FutureUtils.result(writer.write(DLMTestUtil.getLogRecordInstance(1)));
final AtomicInteger writeCount = new AtomicInteger(2);
DistributedLogManager dlmread = createNewDLM(conf, name);
BKSyncLogReaderDLSN reader0 = (BKSyncLogReaderDLSN) dlmread.getInputStream(0);
try {
ReaderThread[] readerThreads = new ReaderThread[1];
readerThreads[0] = new ReaderThread("reader0-non-blocking", reader0, false);
// readerThreads[1] = new ReaderThread("reader1-non-blocking", reader0, false);
final AtomicBoolean running = new AtomicBoolean(true);
Thread writerThread = new Thread("WriteThread") {
@Override
public void run() {
try {
long txid = 2;
DLSN dlsn = DLSN.InvalidDLSN;
while (running.get()) {
limiter.acquire();
long curTxId = txid++;
dlsn = FutureUtils.result(writer.write(DLMTestUtil.getLogRecordInstance(curTxId)));
writeCount.incrementAndGet();
if (curTxId % 1000 == 0) {
LOG.info("writer write {}", curTxId);
}
}
LOG.info("Completed writing record at {}", dlsn);
Utils.close(writer);
} catch (DLInterruptedException die) {
Thread.currentThread().interrupt();
} catch (IOException e) {
}
}
};
for (ReaderThread rt : readerThreads) {
rt.start();
}
writerThread.start();
TimeUnit.SECONDS.sleep(5);
LOG.info("Stopping writer");
running.set(false);
writerThread.join();
LOG.info("Writer stopped after writing {} records, waiting for reader to complete", writeCount.get());
while (writeCount.get() > (readerThreads[0].getReadCount())) {
LOG.info("Write Count = {}, Read Count = {}, ReadAhead = {}", new Object[] { writeCount.get(), readerThreads[0].getReadCount(), reader0.getReadAheadPosition() });
TimeUnit.MILLISECONDS.sleep(100);
}
assertEquals(writeCount.get(), (readerThreads[0].getReadCount()));
for (ReaderThread readerThread : readerThreads) {
readerThread.stopReading();
}
} finally {
dlmwrite.close();
reader0.close();
dlmread.close();
}
}
use of com.google.common.util.concurrent.RateLimiter in project alluxio by Alluxio.
the class StressJobServiceBench method runLocal.
@Override
public JobServiceBenchTaskResult runLocal() throws Exception {
ExecutorService service = ExecutorServiceFactories.fixedThreadPool("bench-thread", mParameters.mThreads).create();
long timeOutMs = FormatUtils.parseTimeSize(mBaseParameters.mBenchTimeout);
long durationMs = FormatUtils.parseTimeSize(mParameters.mDuration);
long warmupMs = FormatUtils.parseTimeSize(mParameters.mWarmup);
long startMs = mBaseParameters.mStartMs;
if (mBaseParameters.mStartMs == BaseParameters.UNDEFINED_START_MS) {
startMs = CommonUtils.getCurrentMs() + 1000;
}
long endMs = startMs + warmupMs + durationMs;
RateLimiter rateLimiter = RateLimiter.create(mParameters.mTargetThroughput);
BenchContext context = new BenchContext(rateLimiter, startMs, endMs);
List<Callable<Void>> callables = new ArrayList<>(mParameters.mThreads);
for (int dirId = 0; dirId < mParameters.mThreads; dirId++) {
String filePath = String.format("%s/%s/%d", mParameters.mBasePath, mBaseParameters.mId, dirId);
callables.add(new BenchThread(context, filePath));
}
service.invokeAll(callables, timeOutMs, TimeUnit.MILLISECONDS);
service.shutdownNow();
service.awaitTermination(30, TimeUnit.SECONDS);
if (!mBaseParameters.mProfileAgent.isEmpty()) {
context.addAdditionalResult();
}
return context.getResult();
}
use of com.google.common.util.concurrent.RateLimiter in project web3sdk by FISCO-BCOS.
the class PerfomanceTableInsert method main.
public static void main(String[] args) throws Exception {
try {
String groupId = args[3];
ApplicationContext context = new ClassPathXmlApplicationContext("classpath:applicationContext.xml");
Service service = context.getBean(Service.class);
service.setGroupId(Integer.parseInt(groupId));
service.run();
System.out.println("Start test...");
System.out.println("===================================================================");
ChannelEthereumService channelEthereumService = new ChannelEthereumService();
channelEthereumService.setChannelService(service);
ScheduledExecutorService scheduledExecutorService = Executors.newScheduledThreadPool(500);
Web3j web3 = Web3j.build(channelEthereumService, 15 * 100, scheduledExecutorService, Integer.parseInt(groupId));
Credentials credentials = Credentials.create("b83261efa42895c38c6c2364ca878f43e77f3cddbc922bf57d0d48070f79feb6");
BigInteger gasPrice = new BigInteger("30000000");
BigInteger gasLimit = new BigInteger("30000000");
String command = args[0];
Integer count = 0;
Integer qps = 0;
switch(command) {
case "trans":
count = Integer.parseInt(args[1]);
qps = Integer.parseInt(args[2]);
break;
default:
System.out.println("Args: <trans> <Total> <QPS>");
}
ThreadPoolTaskExecutor threadPool = new ThreadPoolTaskExecutor();
threadPool.setCorePoolSize(200);
threadPool.setMaxPoolSize(500);
threadPool.setQueueCapacity(count);
threadPool.initialize();
System.out.println("Deploying contract...");
TableTest tabletest = TableTest.deploy(web3, credentials, gasPrice, gasLimit).send();
PerfomanceCollector collector = new PerfomanceCollector();
collector.setTotal(count);
RateLimiter limiter = RateLimiter.create(qps);
Integer area = count / 10;
final Integer total = count;
Random random = new Random(System.currentTimeMillis());
System.out.println("Start test,total:" + count);
for (Integer i = 0; i < count; ++i) {
threadPool.execute(new Runnable() {
@Override
public void run() {
limiter.acquire();
PerfomanceTableTestCallback callback = new PerfomanceTableTestCallback();
callback.setCollector(collector);
try {
long _id = getNextID();
tabletest.insert("fruit" + _id % TableTestClient.modevalue, BigInteger.valueOf(_id), "apple" + getId(), callback);
} catch (Exception e) {
TransactionReceipt receipt = new TransactionReceipt();
receipt.setStatus("-1");
callback.onResponse(receipt);
logger.error("Error sending:", e);
}
int current = sended.incrementAndGet();
if (current >= area && ((current % area) == 0)) {
System.out.println("Already sended: " + current + "/" + total + " transactions");
}
}
});
}
} catch (Exception e) {
e.printStackTrace();
System.exit(-1);
}
}
Aggregations