Search in sources :

Example 56 with LongWritable

use of org.apache.hadoop.io.LongWritable in project jena by apache.

the class AbstractWholeFileNodeTupleReader method nextKeyValue.

@Override
public boolean nextKeyValue() throws IOException {
    // Reuse key for efficiency
    if (key == null) {
        key = new LongWritable();
    }
    if (this.finished)
        return false;
    try {
        if (this.iter.hasNext()) {
            Long l = this.stream.getPosition();
            if (l != null) {
                this.key.set(l);
                // finished
                if (this.compressionCodecs != null && l > this.length)
                    this.length = l + 1;
            }
            this.tuple = this.createInstance(this.iter.next());
            return true;
        } else {
            // Need to ensure that the parser thread has finished in order
            // to determine whether we finished without error
            this.waitForParserFinished();
            if (this.parserError != null) {
                LOG.error("Error parsing whole file, aborting further parsing", this.parserError);
                if (!this.ignoreBadTuples)
                    throw new IOException("Error parsing whole file at position " + this.input.getBytesRead() + ", aborting further parsing", this.parserError);
            }
            this.key = null;
            this.tuple = null;
            this.finished = true;
            // the stream
            if (this.compressionCodecs != null)
                this.length--;
            return false;
        }
    } catch (Throwable e) {
        // Failed to read the tuple on this line
        LOG.error("Error parsing whole file, aborting further parsing", e);
        if (!this.ignoreBadTuples) {
            this.iter.close();
            throw new IOException("Error parsing whole file at position " + this.input.getBytesRead() + ", aborting further parsing", e);
        }
        this.key = null;
        this.tuple = null;
        this.finished = true;
        return false;
    }
}
Also used : LongWritable(org.apache.hadoop.io.LongWritable) IOException(java.io.IOException)

Example 57 with LongWritable

use of org.apache.hadoop.io.LongWritable in project hadoop by apache.

the class TestIPC method testIpcConnectTimeout.

@Test(timeout = 60000)
public void testIpcConnectTimeout() throws IOException {
    // start server
    Server server = new TestServer(1, true);
    InetSocketAddress addr = NetUtils.getConnectAddress(server);
    //Intentionally do not start server to get a connection timeout
    // start client
    Client.setConnectTimeout(conf, 100);
    Client client = new Client(LongWritable.class, conf);
    // set the rpc timeout to twice the MIN_SLEEP_TIME
    try {
        call(client, new LongWritable(RANDOM.nextLong()), addr, MIN_SLEEP_TIME * 2, conf);
        fail("Expected an exception to have been thrown");
    } catch (SocketTimeoutException e) {
        LOG.info("Get a SocketTimeoutException ", e);
    }
    client.stop();
}
Also used : SocketTimeoutException(java.net.SocketTimeoutException) InetSocketAddress(java.net.InetSocketAddress) LongWritable(org.apache.hadoop.io.LongWritable) Test(org.junit.Test)

Example 58 with LongWritable

use of org.apache.hadoop.io.LongWritable in project hadoop by apache.

the class TestIPC method testIpcTimeout.

@Test(timeout = 60000)
public void testIpcTimeout() throws IOException {
    // start server
    Server server = new TestServer(1, true);
    InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    // start client
    Client client = new Client(LongWritable.class, conf);
    // set timeout to be less than MIN_SLEEP_TIME
    try {
        call(client, new LongWritable(RANDOM.nextLong()), addr, MIN_SLEEP_TIME / 2, conf);
        fail("Expected an exception to have been thrown");
    } catch (SocketTimeoutException e) {
        LOG.info("Get a SocketTimeoutException ", e);
    }
    // set timeout to be bigger than 3*ping interval
    call(client, new LongWritable(RANDOM.nextLong()), addr, 3 * PING_INTERVAL + MIN_SLEEP_TIME, conf);
    client.stop();
}
Also used : SocketTimeoutException(java.net.SocketTimeoutException) InetSocketAddress(java.net.InetSocketAddress) LongWritable(org.apache.hadoop.io.LongWritable) Test(org.junit.Test)

Example 59 with LongWritable

use of org.apache.hadoop.io.LongWritable in project hadoop by apache.

the class TestIPC method checkBlocking.

// goal is to jam a handler with a connection, fill the callq with
// connections, in turn jamming the readers - then flood the server and
// ensure that the listener blocks when the reader connection queues fill
@SuppressWarnings("unchecked")
private void checkBlocking(int readers, int readerQ, int callQ) throws Exception {
    // makes it easier
    int handlers = 1;
    final Configuration conf = new Configuration();
    conf.setInt(CommonConfigurationKeys.IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_KEY, readerQ);
    // send in enough clients to block up the handlers, callq, and readers
    final int initialClients = readers + callQ + handlers;
    // max connections we should ever end up accepting at once
    // 1 = listener
    final int maxAccept = initialClients + readers * readerQ + 1;
    // stress it with 2X the max
    int clients = maxAccept * 2;
    final AtomicInteger failures = new AtomicInteger(0);
    final CountDownLatch callFinishedLatch = new CountDownLatch(clients);
    // start server
    final TestServerQueue server = new TestServerQueue(clients, readers, callQ, handlers, conf);
    CallQueueManager<Call> spy = spy((CallQueueManager<Call>) Whitebox.getInternalState(server, "callQueue"));
    Whitebox.setInternalState(server, "callQueue", spy);
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    server.start();
    Client.setConnectTimeout(conf, 10000);
    // instantiate the threads, will start in batches
    Thread[] threads = new Thread[clients];
    for (int i = 0; i < clients; i++) {
        threads[i] = new Thread(new Runnable() {

            @Override
            public void run() {
                Client client = new Client(LongWritable.class, conf);
                try {
                    call(client, new LongWritable(Thread.currentThread().getId()), addr, 60000, conf);
                } catch (Throwable e) {
                    LOG.error(e);
                    failures.incrementAndGet();
                    return;
                } finally {
                    callFinishedLatch.countDown();
                    client.stop();
                }
            }
        });
    }
    // and others not blocking in the race to fill the callq
    for (int i = 0; i < initialClients; i++) {
        threads[i].start();
        if (i == 0) {
            // let first reader block in a call
            server.firstCallLatch.await();
        }
        // wait until reader put a call to callQueue, to make sure all readers
        // are blocking on the queue after initialClients threads are started.
        verify(spy, timeout(100).times(i + 1)).put(Mockito.<Call>anyObject());
    }
    try {
        // wait till everything is slotted, should happen immediately
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                return server.getNumOpenConnections() >= initialClients;
            }
        }, 100, 3000);
    } catch (TimeoutException e) {
        fail("timed out while waiting for connections to open.");
    }
    LOG.info("(initial clients) need:" + initialClients + " connections have:" + server.getNumOpenConnections());
    LOG.info("ipc layer should be blocked");
    assertEquals(callQ, server.getCallQueueLen());
    assertEquals(initialClients, server.getNumOpenConnections());
    // connection queues should fill and then the listener should block
    for (int i = initialClients; i < clients; i++) {
        threads[i].start();
    }
    Thread.sleep(10);
    try {
        GenericTestUtils.waitFor(new Supplier<Boolean>() {

            @Override
            public Boolean get() {
                return server.getNumOpenConnections() >= maxAccept;
            }
        }, 100, 3000);
    } catch (TimeoutException e) {
        fail("timed out while waiting for connections to open until maxAccept.");
    }
    LOG.info("(max clients) need:" + maxAccept + " connections have:" + server.getNumOpenConnections());
    // check a few times to make sure we didn't go over
    for (int i = 0; i < 4; i++) {
        assertEquals(maxAccept, server.getNumOpenConnections());
        Thread.sleep(100);
    }
    // sanity check that no calls have finished
    assertEquals(clients, callFinishedLatch.getCount());
    LOG.info("releasing the calls");
    server.callBlockLatch.countDown();
    callFinishedLatch.await();
    for (Thread t : threads) {
        t.join();
    }
    assertEquals(0, failures.get());
    server.stop();
}
Also used : Call(org.apache.hadoop.ipc.Server.Call) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) LongWritable(org.apache.hadoop.io.LongWritable) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TimeoutException(java.util.concurrent.TimeoutException) ConnectTimeoutException(org.apache.hadoop.net.ConnectTimeoutException) SocketTimeoutException(java.net.SocketTimeoutException)

Example 60 with LongWritable

use of org.apache.hadoop.io.LongWritable in project hadoop by apache.

the class TestIPC method call.

static Writable call(Client client, InetSocketAddress addr, int serviceClass, Configuration conf) throws IOException {
    final LongWritable param = new LongWritable(RANDOM.nextLong());
    final ConnectionId remoteId = getConnectionId(addr, MIN_SLEEP_TIME, conf);
    return client.call(RPC.RpcKind.RPC_BUILTIN, param, remoteId, serviceClass, null);
}
Also used : ConnectionId(org.apache.hadoop.ipc.Client.ConnectionId) LongWritable(org.apache.hadoop.io.LongWritable)

Aggregations

LongWritable (org.apache.hadoop.io.LongWritable)445 Text (org.apache.hadoop.io.Text)220 Test (org.junit.Test)171 IntWritable (org.apache.hadoop.io.IntWritable)102 Path (org.apache.hadoop.fs.Path)99 BytesWritable (org.apache.hadoop.io.BytesWritable)70 FloatWritable (org.apache.hadoop.io.FloatWritable)68 Configuration (org.apache.hadoop.conf.Configuration)62 DoubleWritable (org.apache.hadoop.hive.serde2.io.DoubleWritable)62 BooleanWritable (org.apache.hadoop.io.BooleanWritable)60 ArrayList (java.util.ArrayList)59 ObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector)57 ShortWritable (org.apache.hadoop.hive.serde2.io.ShortWritable)53 IOException (java.io.IOException)49 ByteWritable (org.apache.hadoop.hive.serde2.io.ByteWritable)48 SequenceFile (org.apache.hadoop.io.SequenceFile)42 HiveDecimalWritable (org.apache.hadoop.hive.serde2.io.HiveDecimalWritable)40 FileSystem (org.apache.hadoop.fs.FileSystem)37 JobConf (org.apache.hadoop.mapred.JobConf)37 DeferredObject (org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject)35