Search in sources :

Example 41 with ExecutorService

use of java.util.concurrent.ExecutorService in project pinot by linkedin.

the class DruidThroughput method main.

@SuppressWarnings("InfiniteLoopStatement")
public static void main(String[] args) throws Exception {
    final int numQueries = QUERIES.length;
    final Random random = new Random(RANDOM_SEED);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(NUM_CLIENTS);
    for (int i = 0; i < NUM_CLIENTS; i++) {
        executorService.submit(new Runnable() {

            @Override
            public void run() {
                try (CloseableHttpClient client = HttpClients.createDefault()) {
                    HttpPost post = new HttpPost("http://localhost:8082/druid/v2/?pretty");
                    post.addHeader("content-type", "application/json");
                    CloseableHttpResponse res;
                    while (true) {
                        try (BufferedReader reader = new BufferedReader(new FileReader(QUERY_FILE_DIR + File.separator + random.nextInt(numQueries) + ".json"))) {
                            int length = reader.read(BUFFER);
                            post.setEntity(new StringEntity(new String(BUFFER, 0, length)));
                        }
                        long start = System.currentTimeMillis();
                        res = client.execute(post);
                        res.close();
                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(System.currentTimeMillis() - start);
                    }
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        });
    }
    long startTime = System.currentTimeMillis();
    while (true) {
        Thread.sleep(REPORT_INTERVAL_MILLIS);
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.get();
        double avgResponseTime = ((double) totalResponseTime.get()) / count;
        System.out.println("Time Passed: " + timePassedSeconds + "s, Query Executed: " + count + ", QPS: " + count / timePassedSeconds + ", Avg Response Time: " + avgResponseTime + "ms");
    }
}
Also used : CloseableHttpClient(org.apache.http.impl.client.CloseableHttpClient) HttpPost(org.apache.http.client.methods.HttpPost) IOException(java.io.IOException) StringEntity(org.apache.http.entity.StringEntity) AtomicLong(java.util.concurrent.atomic.AtomicLong) Random(java.util.Random) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ExecutorService(java.util.concurrent.ExecutorService) CloseableHttpResponse(org.apache.http.client.methods.CloseableHttpResponse) BufferedReader(java.io.BufferedReader) FileReader(java.io.FileReader)

Example 42 with ExecutorService

use of java.util.concurrent.ExecutorService in project pinot by linkedin.

the class HybridClusterIntegrationTest method setUp.

@BeforeClass
public void setUp() throws Exception {
    //Clean up
    ensureDirectoryExistsAndIsEmpty(_tmpDir);
    ensureDirectoryExistsAndIsEmpty(_segmentDir);
    ensureDirectoryExistsAndIsEmpty(_tarDir);
    tableName = TABLE_NAME;
    // Start Zk, Kafka and Pinot
    startHybridCluster(10);
    // Unpack the Avro files
    TarGzCompressionUtils.unTar(new File(TestUtils.getFileFromResourceUrl(OfflineClusterIntegrationTest.class.getClassLoader().getResource("On_Time_On_Time_Performance_2014_100k_subset_nonulls.tar.gz"))), _tmpDir);
    _tmpDir.mkdirs();
    final List<File> avroFiles = getAllAvroFiles();
    File schemaFile = getSchemaFile();
    schema = Schema.fromFile(schemaFile);
    addSchema(schemaFile, schema.getSchemaName());
    final List<String> invertedIndexColumns = makeInvertedIndexColumns();
    final String sortedColumn = makeSortedColumn();
    // Create Pinot table
    addHybridTable(tableName, "DaysSinceEpoch", "daysSinceEpoch", KafkaStarterUtils.DEFAULT_ZK_STR, KAFKA_TOPIC, schema.getSchemaName(), TENANT_NAME, TENANT_NAME, avroFiles.get(0), sortedColumn, invertedIndexColumns, null, false);
    LOGGER.info("Running with Sorted column=" + sortedColumn + " and inverted index columns = " + invertedIndexColumns);
    // Create a subset of the first 8 segments (for offline) and the last 6 segments (for realtime)
    final List<File> offlineAvroFiles = getOfflineAvroFiles(avroFiles);
    final List<File> realtimeAvroFiles = getRealtimeAvroFiles(avroFiles);
    // Load data into H2
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);
    // Create segments from Avro data
    LOGGER.info("Creating offline segments from avro files " + offlineAvroFiles);
    buildSegmentsFromAvro(offlineAvroFiles, executor, 0, _segmentDir, _tarDir, tableName, false, null);
    // Initialize query generator
    setupQueryGenerator(avroFiles, executor);
    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);
    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments are online
    final CountDownLatch latch = new CountDownLatch(1);
    HelixManager manager = HelixManagerFactory.getZKHelixManager(getHelixClusterName(), "test_instance", InstanceType.SPECTATOR, ZkStarter.DEFAULT_ZK_STR);
    manager.connect();
    manager.addExternalViewChangeListener(new ExternalViewChangeListener() {

        @Override
        public void onExternalViewChange(List<ExternalView> externalViewList, NotificationContext changeContext) {
            for (ExternalView externalView : externalViewList) {
                if (externalView.getId().contains(tableName)) {
                    Set<String> partitionSet = externalView.getPartitionSet();
                    if (partitionSet.size() == offlineSegmentCount) {
                        int onlinePartitionCount = 0;
                        for (String partitionId : partitionSet) {
                            Map<String, String> partitionStateMap = externalView.getStateMap(partitionId);
                            if (partitionStateMap.containsValue("ONLINE")) {
                                onlinePartitionCount++;
                            }
                        }
                        if (onlinePartitionCount == offlineSegmentCount) {
                            //                System.out.println("Got " + offlineSegmentCount + " online tables, unlatching the main thread");
                            latch.countDown();
                        }
                    }
                }
            }
        }
    });
    // Upload the segments
    int i = 0;
    for (String segmentName : _tarDir.list()) {
        //      System.out.println("Uploading segment " + (i++) + " : " + segmentName);
        File file = new File(_tarDir, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, file, file.length());
    }
    // Wait for all offline segments to be online
    latch.await();
    // Load realtime data into Kafka
    LOGGER.info("Pushing data from realtime avro files " + realtimeAvroFiles);
    pushAvroIntoKafka(realtimeAvroFiles, KafkaStarterUtils.DEFAULT_KAFKA_BROKER, KAFKA_TOPIC);
    // Wait until the Pinot event count matches with the number of events in the Avro files
    int pinotRecordCount, h2RecordCount;
    long timeInFiveMinutes = System.currentTimeMillis() + 5 * 60 * 1000L;
    Statement statement = _connection.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY);
    statement.execute("select count(*) from " + tableName);
    ResultSet rs = statement.getResultSet();
    rs.first();
    h2RecordCount = rs.getInt(1);
    rs.close();
    waitForRecordCountToStabilizeToExpectedCount(h2RecordCount, timeInFiveMinutes);
}
Also used : ExternalView(org.apache.helix.model.ExternalView) HelixManager(org.apache.helix.HelixManager) ResultSet(java.sql.ResultSet) Set(java.util.Set) Statement(java.sql.Statement) CountDownLatch(java.util.concurrent.CountDownLatch) NotificationContext(org.apache.helix.NotificationContext) ExecutorService(java.util.concurrent.ExecutorService) ResultSet(java.sql.ResultSet) ExternalViewChangeListener(org.apache.helix.ExternalViewChangeListener) File(java.io.File) Map(java.util.Map) BeforeClass(org.testng.annotations.BeforeClass)

Example 43 with ExecutorService

use of java.util.concurrent.ExecutorService in project pinot by linkedin.

the class DefaultColumnsClusterIntegrationTest method setUp.

protected void setUp(boolean sendSchema) throws Exception {
    // Set up directories.
    FileUtils.deleteQuietly(TMP_DIR);
    Assert.assertTrue(TMP_DIR.mkdirs());
    Assert.assertTrue(SEGMENT_DIR.mkdir());
    Assert.assertTrue(TAR_DIR.mkdir());
    // Start the cluster.
    startZk();
    startController();
    startBroker();
    startServer();
    // Create the table.
    addOfflineTable("DaysSinceEpoch", "daysSinceEpoch", -1, "", null, null, "mytable", SegmentVersion.v1);
    // Add the schema.
    if (sendSchema) {
        sendSchema(SCHEMA_WITH_EXTRA_COLUMNS);
    }
    // Unpack the Avro files.
    List<File> avroFiles = unpackAvroData(TMP_DIR, SEGMENT_COUNT);
    // Load data into H2.
    ExecutorService executor = Executors.newCachedThreadPool();
    setupH2AndInsertAvro(avroFiles, executor);
    // Create segments from Avro data.
    buildSegmentsFromAvro(avroFiles, executor, 0, SEGMENT_DIR, TAR_DIR, "mytable", false, null);
    // Initialize query generator.
    setupQueryGenerator(avroFiles, executor);
    executor.shutdown();
    executor.awaitTermination(10, TimeUnit.MINUTES);
    // Set up a Helix spectator to count the number of segments that are uploaded and unlock the latch once 12 segments
    // are online.
    CountDownLatch latch = setupSegmentCountCountDownLatch("mytable", SEGMENT_COUNT);
    // Upload the segments.
    for (String segmentName : TAR_DIR.list()) {
        File file = new File(TAR_DIR, segmentName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentName, file, file.length());
    }
    // Wait for all segments to be ONLINE.
    latch.await();
    waitForSegmentsOnline();
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) CountDownLatch(java.util.concurrent.CountDownLatch) File(java.io.File)

Example 44 with ExecutorService

use of java.util.concurrent.ExecutorService in project pinot by linkedin.

the class StarTreeClusterIntegrationTest method generateAndUploadSegments.

/**
   * Generate the reference and star tree indexes and upload to corresponding tables.
   * @param avroFiles
   * @param tableName
   * @param starTree
   * @throws IOException
   * @throws ArchiveException
   * @throws InterruptedException
   */
private void generateAndUploadSegments(List<File> avroFiles, String tableName, boolean starTree) throws IOException, ArchiveException, InterruptedException {
    BaseClusterIntegrationTest.ensureDirectoryExistsAndIsEmpty(_segmentsDir);
    BaseClusterIntegrationTest.ensureDirectoryExistsAndIsEmpty(_tarredSegmentsDir);
    ExecutorService executor = Executors.newCachedThreadPool();
    BaseClusterIntegrationTest.buildSegmentsFromAvro(avroFiles, executor, 0, _segmentsDir, _tarredSegmentsDir, tableName, starTree, getSingleValueColumnsSchema());
    executor.shutdown();
    executor.awaitTermination(TIMEOUT_IN_SECONDS, TimeUnit.SECONDS);
    for (String segmentName : _tarredSegmentsDir.list()) {
        LOGGER.info("Uploading segment {}", segmentName);
        File file = new File(_tarredSegmentsDir, segmentName);
        FileUploadUtils.sendSegmentFile(ControllerTestUtils.DEFAULT_CONTROLLER_HOST, ControllerTestUtils.DEFAULT_CONTROLLER_API_PORT, segmentName, file, file.length());
    }
}
Also used : ExecutorService(java.util.concurrent.ExecutorService) File(java.io.File)

Example 45 with ExecutorService

use of java.util.concurrent.ExecutorService in project pinot by linkedin.

the class UploadRefreshDeleteIntegrationTest method generateAndUploadRandomSegment.

protected void generateAndUploadRandomSegment(String segmentName, int rowCount) throws Exception {
    ThreadLocalRandom random = ThreadLocalRandom.current();
    Schema schema = new Schema.Parser().parse(new File(TestUtils.getFileFromResourceUrl(getClass().getClassLoader().getResource("dummy.avsc"))));
    GenericRecord record = new GenericData.Record(schema);
    GenericDatumWriter<GenericRecord> datumWriter = new GenericDatumWriter<GenericRecord>(schema);
    DataFileWriter<GenericRecord> fileWriter = new DataFileWriter<GenericRecord>(datumWriter);
    File avroFile = new File(_tmpDir, segmentName + ".avro");
    fileWriter.create(schema, avroFile);
    for (int i = 0; i < rowCount; i++) {
        record.put(0, random.nextInt());
        fileWriter.append(record);
    }
    fileWriter.close();
    int segmentIndex = Integer.parseInt(segmentName.split("_")[1]);
    File segmentTarDir = new File(_tarsDir, segmentName);
    ensureDirectoryExistsAndIsEmpty(segmentTarDir);
    ExecutorService executor = MoreExecutors.sameThreadExecutor();
    buildSegmentsFromAvro(Collections.singletonList(avroFile), executor, segmentIndex, new File(_segmentsDir, segmentName), segmentTarDir, this.tableName, false, null);
    executor.shutdown();
    executor.awaitTermination(1L, TimeUnit.MINUTES);
    for (String segmentFileName : segmentTarDir.list()) {
        File file = new File(segmentTarDir, segmentFileName);
        FileUploadUtils.sendSegmentFile("localhost", "8998", segmentFileName, file, file.length());
    }
    avroFile.delete();
    FileUtils.deleteQuietly(segmentTarDir);
}
Also used : Schema(org.apache.avro.Schema) DataFileWriter(org.apache.avro.file.DataFileWriter) GenericDatumWriter(org.apache.avro.generic.GenericDatumWriter) ExecutorService(java.util.concurrent.ExecutorService) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) GenericRecord(org.apache.avro.generic.GenericRecord) GenericRecord(org.apache.avro.generic.GenericRecord) File(java.io.File)

Aggregations

ExecutorService (java.util.concurrent.ExecutorService)4721 Test (org.junit.Test)1645 ArrayList (java.util.ArrayList)1157 Future (java.util.concurrent.Future)1064 CountDownLatch (java.util.concurrent.CountDownLatch)722 IOException (java.io.IOException)699 Callable (java.util.concurrent.Callable)612 ExecutionException (java.util.concurrent.ExecutionException)532 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)343 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)296 HashMap (java.util.HashMap)257 List (java.util.List)257 Test (org.testng.annotations.Test)240 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)235 File (java.io.File)210 TimeoutException (java.util.concurrent.TimeoutException)207 Map (java.util.Map)201 HashSet (java.util.HashSet)171 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)169 Test (org.junit.jupiter.api.Test)163