Search in sources :

Example 6 with ThreadLocalRandom

use of java.util.concurrent.ThreadLocalRandom in project pinot by linkedin.

the class ExponentialBackoffRetryPolicy method attempt.

@Override
public boolean attempt(Callable<Boolean> operation) {
    try {
        ThreadLocalRandom random = ThreadLocalRandom.current();
        int remainingAttempts = _maximumAttemptCount - 1;
        long minimumSleepTime = _minimumMilliseconds;
        long maximumSleepTime = (long) (minimumSleepTime * _retryScaleFactor);
        boolean result = operation.call();
        while ((!result) && (0 < remainingAttempts)) {
            long sleepTime = random.nextLong(minimumSleepTime, maximumSleepTime);
            Uninterruptibles.sleepUninterruptibly(sleepTime, TimeUnit.MILLISECONDS);
            result = operation.call();
            remainingAttempts--;
            minimumSleepTime *= _retryScaleFactor;
            maximumSleepTime *= _retryScaleFactor;
        }
        return result;
    } catch (Exception e) {
        Utils.rethrowException(e);
        return false;
    }
}
Also used : ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom)

Example 7 with ThreadLocalRandom

use of java.util.concurrent.ThreadLocalRandom in project pinot by linkedin.

the class UploadRefreshDeleteIntegrationTest method generateAndUploadRandomSegment1.

protected void generateAndUploadRandomSegment1(final String segmentName, int rowCount) throws Exception {
    ThreadLocalRandom random = ThreadLocalRandom.current();
    Schema schema = new Schema.Parser().parse(new File(TestUtils.getFileFromResourceUrl(getClass().getClassLoader().getResource("dummy.avsc"))));
    GenericRecord record = new GenericData.Record(schema);
    GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(schema);
    DataFileWriter<GenericRecord> fileWriter = new DataFileWriter<GenericRecord>(datumWriter);
    final File avroFile = new File(_tmpDir, segmentName + ".avro");
    fileWriter.create(schema, avroFile);
    for (int i = 0; i < rowCount; i++) {
        record.put(0, random.nextInt());
        fileWriter.append(record);
    }
    fileWriter.close();
    final int segmentIndex = Integer.parseInt(segmentName.split("_")[1]);
    final String TAR_GZ_FILE_EXTENTION = ".tar.gz";
    File segmentTarDir = new File(_tarsDir, segmentName);
    buildSegment(segmentTarDir, avroFile, segmentIndex, segmentName, 0);
    String segmentFileName = segmentName;
    for (String name : segmentTarDir.list()) {
        if (name.endsWith(TAR_GZ_FILE_EXTENTION)) {
            segmentFileName = name;
        }
    }
    File file = new File(segmentTarDir, segmentFileName);
    long segmentLength = file.length();
    final File segmentTarDir1 = new File(_tarsDir, segmentName);
    FileUtils.deleteQuietly(segmentTarDir);
    new Thread(new Runnable() {

        @Override
        public void run() {
            try {
                buildSegment(segmentTarDir1, avroFile, segmentIndex, segmentName, 5);
            } catch (Exception e) {
            }
        }
    }).start();
    FileUploadUtils.sendSegmentFile("localhost", "8998", segmentFileName, file, segmentLength, 5, 5);
    avroFile.delete();
    FileUtils.deleteQuietly(segmentTarDir);
}
Also used : Schema(org.apache.avro.Schema) DataFileWriter(org.apache.avro.file.DataFileWriter) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) GenericRecord(org.apache.avro.generic.GenericRecord) GenericRecord(org.apache.avro.generic.GenericRecord) File(java.io.File)

Example 8 with ThreadLocalRandom

use of java.util.concurrent.ThreadLocalRandom in project pinot by linkedin.

the class UploadRefreshDeleteIntegrationTest method generateAndUploadRandomSegment.

protected void generateAndUploadRandomSegment(String segmentName, int rowCount) throws Exception {
    ThreadLocalRandom random = ThreadLocalRandom.current();
    Schema schema = new Schema.Parser().parse(new File(TestUtils.getFileFromResourceUrl(getClass().getClassLoader().getResource("dummy.avsc"))));
    GenericRecord record = new GenericData.Record(schema);
    GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(schema);
    DataFileWriter<GenericRecord> fileWriter = new DataFileWriter<GenericRecord>(datumWriter);
    File avroFile = new File(_tmpDir, segmentName + ".avro");
    fileWriter.create(schema, avroFile);
    for (int i = 0; i < rowCount; i++) {
        record.put(0, random.nextInt());
        fileWriter.append(record);
    }
    fileWriter.close();
    int segmentIndex = Integer.parseInt(segmentName.split("_")[1]);
    File segmentTarDir = new File(_tarsDir, segmentName);
    ensureDirectoryExistsAndIsEmpty(segmentTarDir);
    ExecutorService executor = MoreExecutors.sameThreadExecutor();
    buildSegmentsFromAvro(Collections.singletonList(avroFile), executor, segmentIndex, new File(_segmentsDir, segmentName), segmentTarDir, this.tableName, false, null);
    executor.shutdown();
    executor.awaitTermination(1L, TimeUnit.MINUTES);
    for (String segmentFileName : segmentTarDir.list()) {
        File file = new File(segmentTarDir, segmentFileName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentFileName, file, file.length());
    }
    avroFile.delete();
    FileUtils.deleteQuietly(segmentTarDir);
}
Also used : Schema(org.apache.avro.Schema) DataFileWriter(org.apache.avro.file.DataFileWriter) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) ExecutorService(java.util.concurrent.ExecutorService) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) GenericRecord(org.apache.avro.generic.GenericRecord) GenericRecord(org.apache.avro.generic.GenericRecord) File(java.io.File)

Example 9 with ThreadLocalRandom

use of java.util.concurrent.ThreadLocalRandom in project pinot by linkedin.

the class UploadRefreshDeleteIntegrationTest method testUploadRefreshDelete.

@Test(enabled = false, dataProvider = "configProvider")
public void testUploadRefreshDelete(String tableName, SegmentVersion version) throws Exception {
    final int THREAD_COUNT = 1;
    final int SEGMENT_COUNT = 5;
    final int MIN_ROWS_PER_SEGMENT = 500;
    final int MAX_ROWS_PER_SEGMENT = 1000;
    final int OPERATIONS_PER_ITERATION = 10;
    final int ITERATION_COUNT = 5;
    final double UPLOAD_PROBABILITY = 0.8d;
    final String[] segmentNames = new String[SEGMENT_COUNT];
    final int[] segmentRowCounts = new int[SEGMENT_COUNT];
    for (int i = 0; i < SEGMENT_COUNT; i++) {
        segmentNames[i] = "segment_" + i;
        segmentRowCounts[i] = 0;
    }
    for (int i = 0; i < ITERATION_COUNT; i++) {
        // Create THREAD_COUNT threads
        ExecutorService executorService = Executors.newFixedThreadPool(THREAD_COUNT);
        // Submit OPERATIONS_PER_ITERATION uploads/deletes
        for (int j = 0; j < OPERATIONS_PER_ITERATION; j++) {
            executorService.submit(new Runnable() {

                @Override
                public void run() {
                    try {
                        ThreadLocalRandom random = ThreadLocalRandom.current();
                        // Pick a random segment
                        int segmentIndex = random.nextInt(SEGMENT_COUNT);
                        String segmentName = segmentNames[segmentIndex];
                        // Pick a random operation
                        if (random.nextDouble() < UPLOAD_PROBABILITY) {
                            // Upload this segment
                            LOGGER.info("Will upload segment {}", segmentName);
                            synchronized (segmentName) {
                                // Create a segment with a random number of rows
                                int segmentRowCount = random.nextInt(MIN_ROWS_PER_SEGMENT, MAX_ROWS_PER_SEGMENT);
                                LOGGER.info("Generating and uploading segment {} with {} rows", segmentName, segmentRowCount);
                                generateAndUploadRandomSegment(segmentName, segmentRowCount);
                                // Store the number of rows
                                LOGGER.info("Uploaded segment {} with {} rows", segmentName, segmentRowCount);
                                segmentRowCounts[segmentIndex] = segmentRowCount;
                            }
                        } else {
                            // Delete this segment
                            LOGGER.info("Will delete segment {}", segmentName);
                            synchronized (segmentName) {
                                // Delete this segment
                                LOGGER.info("Deleting segment {}", segmentName);
                                String reply = sendDeleteRequest(ControllerRequestURLBuilder.baseUrl(CONTROLLER_BASE_API_URL).forSegmentDelete("myresource", segmentName));
                                LOGGER.info("Deletion returned {}", reply);
                                // Set the number of rows to zero
                                LOGGER.info("Deleted segment {}", segmentName);
                                segmentRowCounts[segmentIndex] = 0;
                            }
                        }
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }
            });
        }
        // Await for all tasks to complete
        executorService.shutdown();
        executorService.awaitTermination(5L, TimeUnit.MINUTES);
        // Count number of expected rows
        int expectedRowCount = 0;
        for (int segmentRowCount : segmentRowCounts) {
            expectedRowCount += segmentRowCount;
        }
        // Wait for up to one minute for the row count to match the expected row count
        LOGGER.info("Awaiting for the row count to match {}", expectedRowCount);
        int pinotRowCount = (int) getCurrentServingNumDocs(this.tableName);
        long timeInOneMinute = System.currentTimeMillis() + 60 * 1000L;
        while (System.currentTimeMillis() < timeInOneMinute && pinotRowCount != expectedRowCount) {
            LOGGER.info("Row count is {}, expected {}, awaiting for row count to match", pinotRowCount, expectedRowCount);
            Thread.sleep(5000L);
            try {
                pinotRowCount = (int) getCurrentServingNumDocs(this.tableName);
            } catch (Exception e) {
                LOGGER.warn("Caught exception while sending query to Pinot, retrying", e);
            }
        }
        // Compare row counts
        Assert.assertEquals(pinotRowCount, expectedRowCount, "Expected and actual row counts don't match after waiting one minute");
    }
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) Test(org.testng.annotations.Test)

Example 10 with ThreadLocalRandom

use of java.util.concurrent.ThreadLocalRandom in project neo4j by neo4j.

the class ConsistentPropertyReadsIT method shouldReadConsistentPropertyValues.

@Test
public void shouldReadConsistentPropertyValues() throws Throwable {
    // GIVEN
    final Node[] nodes = new Node[10];
    final String[] keys = new String[] { "1", "2", "3" };
    final String[] values = new String[] { longString('a'), longString('b'), longString('c') };
    try (Transaction tx = db.beginTx()) {
        for (int i = 0; i < nodes.length; i++) {
            nodes[i] = db.createNode();
            for (int j = 0; j < keys.length; j++) {
                nodes[i].setProperty(keys[j], values[0]);
            }
        }
        tx.success();
    }
    int updaters = 10;
    final AtomicLong updatersDone = new AtomicLong(updaters);
    Race race = new Race();
    for (int i = 0; i < updaters; i++) {
        // Changers
        race.addContestant(new Runnable() {

            @Override
            public void run() {
                try {
                    ThreadLocalRandom random = ThreadLocalRandom.current();
                    for (int j = 0; j < 100; j++) {
                        Node node = nodes[random.nextInt(nodes.length)];
                        String key = keys[random.nextInt(keys.length)];
                        try (Transaction tx = db.beginTx()) {
                            node.removeProperty(key);
                            tx.success();
                        }
                        try (Transaction tx = db.beginTx()) {
                            node.setProperty(key, values[random.nextInt(values.length)]);
                            tx.success();
                        }
                    }
                } finally {
                    updatersDone.decrementAndGet();
                }
            }
        });
    }
    for (int i = 0; i < 100; i++) {
        // Readers
        race.addContestant(new Runnable() {

            @Override
            public void run() {
                ThreadLocalRandom random = ThreadLocalRandom.current();
                while (updatersDone.get() > 0) {
                    try (Transaction tx = db.beginTx()) {
                        String value = (String) nodes[random.nextInt(nodes.length)].getProperty(keys[random.nextInt(keys.length)], null);
                        assertTrue(value, value == null || ArrayUtil.contains(values, value));
                        tx.success();
                    }
                }
            }
        });
    }
    // WHEN
    race.go();
}
Also used : AtomicLong(java.util.concurrent.atomic.AtomicLong) Race(org.neo4j.test.Race) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) Test(org.junit.Test)

Aggregations

ThreadLocalRandom (java.util.concurrent.ThreadLocalRandom)186 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)49 Ignite (org.apache.ignite.Ignite)47 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)45 IgniteCache (org.apache.ignite.IgniteCache)33 Test (org.junit.Test)28 ArrayList (java.util.ArrayList)26 IgniteException (org.apache.ignite.IgniteException)26 Transaction (org.apache.ignite.transactions.Transaction)24 CacheException (javax.cache.CacheException)21 HashMap (java.util.HashMap)16 Map (java.util.Map)15 IgniteTransactions (org.apache.ignite.IgniteTransactions)13 CountDownLatch (java.util.concurrent.CountDownLatch)12 TreeMap (java.util.TreeMap)11 Callable (java.util.concurrent.Callable)11 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)10 AtomicLong (java.util.concurrent.atomic.AtomicLong)9 LongAdder (java.util.concurrent.atomic.LongAdder)9 ContinuousQuery (org.apache.ignite.cache.query.ContinuousQuery)9