Search in sources :

Example 1 with Header

use of org.apache.commons.httpclient.Header in project camel by apache.

the class HttpProducer method extractResponseBody.

/**
     * Extracts the response from the method as a InputStream.
     *
     * @param method the method that was executed
     * @param ignoreResponseBody if it is true, camel don't read the response and cached the input stream
     * @return the response either as a stream, or as a deserialized java object
     * @throws IOException can be thrown
     */
protected Object extractResponseBody(HttpMethod method, Exchange exchange, boolean ignoreResponseBody) throws IOException, ClassNotFoundException {
    InputStream is = method.getResponseBodyAsStream();
    if (is == null) {
        return null;
    }
    Header header = method.getResponseHeader(Exchange.CONTENT_ENCODING);
    String contentEncoding = header != null ? header.getValue() : null;
    if (!exchange.getProperty(Exchange.SKIP_GZIP_ENCODING, Boolean.FALSE, Boolean.class)) {
        is = GZIPHelper.uncompressGzip(contentEncoding, is);
    }
    // Honor the character encoding
    String contentType = null;
    header = method.getResponseHeader("content-type");
    if (header != null) {
        contentType = header.getValue();
        // find the charset and set it to the Exchange
        HttpHelper.setCharsetFromContentType(contentType, exchange);
    }
    // if content type is a serialized java object then de-serialize it back to a Java object
    if (contentType != null && contentType.equals(HttpConstants.CONTENT_TYPE_JAVA_SERIALIZED_OBJECT)) {
        // only deserialize java if allowed
        if (getEndpoint().getComponent().isAllowJavaSerializedObject() || getEndpoint().isTransferException()) {
            return HttpHelper.deserializeJavaObjectFromStream(is, exchange.getContext());
        } else {
            // empty response
            return null;
        }
    } else {
        if (!getEndpoint().isDisableStreamCache()) {
            // wrap the response in a stream cache so its re-readable
            InputStream response = null;
            if (!ignoreResponseBody) {
                response = doExtractResponseBodyAsStream(is, exchange);
            }
            return response;
        } else {
            // use the response stream as-is
            return is;
        }
    }
}
Also used : Header(org.apache.commons.httpclient.Header) InputStream(java.io.InputStream)

Example 2 with Header

use of org.apache.commons.httpclient.Header in project hadoop by apache.

the class TestSwiftFileSystemPartitionedUploads method testFilePartUploadNoLengthCheck.

/**
   * tests functionality for big files ( > 5Gb) upload
   */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {
    final Path path = new Path("/test/testFilePartUploadLengthCheck");
    int len = 8192;
    final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
    FSDataOutputStream out = fs.create(path, false, getBufferSize(), (short) 1, BLOCK_SIZE);
    try {
        int totalPartitionsToWrite = len / PART_SIZE_BYTES;
        assertPartitionsWritten("Startup", out, 0);
        //write 2048
        int firstWriteLen = 2048;
        out.write(src, 0, firstWriteLen);
        //assert
        long expected = getExpectedPartitionsWritten(firstWriteLen, PART_SIZE_BYTES, false);
        SwiftUtils.debug(LOG, "First write: predict %d partitions written", expected);
        assertPartitionsWritten("First write completed", out, expected);
        //write the rest
        int remainder = len - firstWriteLen;
        SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);
        out.write(src, firstWriteLen, remainder);
        expected = getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
        assertPartitionsWritten("Remaining data", out, expected);
        out.close();
        expected = getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
        assertPartitionsWritten("Stream closed", out, expected);
        Header[] headers = fs.getStore().getObjectHeaders(path, true);
        for (Header header : headers) {
            LOG.info(header.toString());
        }
        byte[] dest = readDataset(fs, path, len);
        LOG.info("Read dataset from " + path + ": data length =" + len);
        //compare data
        SwiftTestUtils.compareByteArrays(src, dest, len);
        FileStatus status = fs.getFileStatus(path);
        //now see what block location info comes back.
        //This will vary depending on the Swift version, so the results
        //aren't checked -merely that the test actually worked
        BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
        assertNotNull("Null getFileBlockLocations()", locations);
        assertTrue("empty array returned for getFileBlockLocations()", locations.length > 0);
    } finally {
        IOUtils.closeStream(out);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Header(org.apache.commons.httpclient.Header) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) BlockLocation(org.apache.hadoop.fs.BlockLocation) Test(org.junit.Test)

Example 3 with Header

use of org.apache.commons.httpclient.Header in project hadoop by apache.

the class TestSwiftFileSystemPartitionedUploads method testFilePartUpload.

/**
   * tests functionality for big files ( > 5Gb) upload
   */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUpload() throws Throwable {
    final Path path = new Path("/test/testFilePartUpload");
    int len = 8192;
    final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
    FSDataOutputStream out = fs.create(path, false, getBufferSize(), (short) 1, BLOCK_SIZE);
    try {
        int totalPartitionsToWrite = len / PART_SIZE_BYTES;
        assertPartitionsWritten("Startup", out, 0);
        //write 2048
        int firstWriteLen = 2048;
        out.write(src, 0, firstWriteLen);
        //assert
        long expected = getExpectedPartitionsWritten(firstWriteLen, PART_SIZE_BYTES, false);
        SwiftUtils.debug(LOG, "First write: predict %d partitions written", expected);
        assertPartitionsWritten("First write completed", out, expected);
        //write the rest
        int remainder = len - firstWriteLen;
        SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);
        out.write(src, firstWriteLen, remainder);
        expected = getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
        assertPartitionsWritten("Remaining data", out, expected);
        out.close();
        expected = getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
        assertPartitionsWritten("Stream closed", out, expected);
        Header[] headers = fs.getStore().getObjectHeaders(path, true);
        for (Header header : headers) {
            LOG.info(header.toString());
        }
        byte[] dest = readDataset(fs, path, len);
        LOG.info("Read dataset from " + path + ": data length =" + len);
        //compare data
        SwiftTestUtils.compareByteArrays(src, dest, len);
        FileStatus status;
        final Path qualifiedPath = path.makeQualified(fs);
        status = fs.getFileStatus(qualifiedPath);
        //now see what block location info comes back.
        //This will vary depending on the Swift version, so the results
        //aren't checked -merely that the test actually worked
        BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
        assertNotNull("Null getFileBlockLocations()", locations);
        assertTrue("empty array returned for getFileBlockLocations()", locations.length > 0);
        //to a skip
        try {
            validatePathLen(path, len);
        } catch (AssertionError e) {
            //downgrade to a skip
            throw new AssumptionViolatedException(e, null);
        }
    } finally {
        IOUtils.closeStream(out);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) Header(org.apache.commons.httpclient.Header) AssumptionViolatedException(org.junit.internal.AssumptionViolatedException) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) BlockLocation(org.apache.hadoop.fs.BlockLocation) Test(org.junit.Test)

Example 4 with Header

use of org.apache.commons.httpclient.Header in project hadoop by apache.

the class SwiftRestClient method buildException.

/**
   * Build an exception from a failed operation. This can include generating
   * specific exceptions (e.g. FileNotFound), as well as the default
   * {@link SwiftInvalidResponseException}.
   *
   * @param uri URI for operation
   * @param method operation that failed
   * @param statusCode status code
   * @param <M> method type
   * @return an exception to throw
   */
private <M extends HttpMethod> IOException buildException(URI uri, M method, int statusCode) {
    IOException fault;
    //log the failure @debug level
    String errorMessage = String.format("Method %s on %s failed, status code: %d," + " status line: %s", method.getName(), uri, statusCode, method.getStatusLine());
    if (LOG.isDebugEnabled()) {
        LOG.debug(errorMessage);
    }
    //send the command
    switch(statusCode) {
        case SC_NOT_FOUND:
            fault = new FileNotFoundException("Operation " + method.getName() + " on " + uri);
            break;
        case SC_BAD_REQUEST:
            //bad HTTP request
            fault = new SwiftBadRequestException("Bad request against " + uri, method.getName(), uri, method);
            break;
        case SC_REQUESTED_RANGE_NOT_SATISFIABLE:
            //out of range
            StringBuilder errorText = new StringBuilder(method.getStatusText());
            //get the requested length
            Header requestContentLen = method.getRequestHeader(HEADER_CONTENT_LENGTH);
            if (requestContentLen != null) {
                errorText.append(" requested ").append(requestContentLen.getValue());
            }
            //and the result
            Header availableContentRange = method.getResponseHeader(HEADER_CONTENT_RANGE);
            if (availableContentRange != null) {
                errorText.append(" available ").append(availableContentRange.getValue());
            }
            fault = new EOFException(errorText.toString());
            break;
        case SC_UNAUTHORIZED:
            //auth failure; should only happen on the second attempt
            fault = new SwiftAuthenticationFailedException("Operation not authorized- current access token =" + getToken(), method.getName(), uri, method);
            break;
        case SwiftProtocolConstants.SC_TOO_MANY_REQUESTS_429:
        case SwiftProtocolConstants.SC_THROTTLED_498:
            //response code that may mean the client is being throttled
            fault = new SwiftThrottledRequestException("Client is being throttled: too many requests", method.getName(), uri, method);
            break;
        default:
            //return a generic invalid HTTP response
            fault = new SwiftInvalidResponseException(errorMessage, method.getName(), uri, method);
    }
    return fault;
}
Also used : Header(org.apache.commons.httpclient.Header) FileNotFoundException(java.io.FileNotFoundException) EOFException(java.io.EOFException) IOException(java.io.IOException) SwiftAuthenticationFailedException(org.apache.hadoop.fs.swift.exceptions.SwiftAuthenticationFailedException) SwiftInvalidResponseException(org.apache.hadoop.fs.swift.exceptions.SwiftInvalidResponseException) SwiftThrottledRequestException(org.apache.hadoop.fs.swift.exceptions.SwiftThrottledRequestException) SwiftBadRequestException(org.apache.hadoop.fs.swift.exceptions.SwiftBadRequestException)

Example 5 with Header

use of org.apache.commons.httpclient.Header in project hadoop by apache.

the class SwiftNativeFileSystemStore method getObjectMetadata.

/**
   * Get the metadata of an object
   *
   * @param path path
   * @param newest flag to say "set the newest header", otherwise take any entry
   * @return file metadata. -or null if no headers were received back from the server.
   * @throws IOException           on a problem
   * @throws FileNotFoundException if there is nothing at the end
   */
public SwiftFileStatus getObjectMetadata(Path path, boolean newest) throws IOException, FileNotFoundException {
    SwiftObjectPath objectPath = toObjectPath(path);
    final Header[] headers = stat(objectPath, newest);
    //no headers is treated as a missing file
    if (headers.length == 0) {
        throw new FileNotFoundException("Not Found " + path.toUri());
    }
    boolean isDir = false;
    long length = 0;
    long lastModified = 0;
    for (Header header : headers) {
        String headerName = header.getName();
        if (headerName.equals(SwiftProtocolConstants.X_CONTAINER_OBJECT_COUNT) || headerName.equals(SwiftProtocolConstants.X_CONTAINER_BYTES_USED)) {
            length = 0;
            isDir = true;
        }
        if (SwiftProtocolConstants.HEADER_CONTENT_LENGTH.equals(headerName)) {
            length = Long.parseLong(header.getValue());
        }
        if (SwiftProtocolConstants.HEADER_LAST_MODIFIED.equals(headerName)) {
            final SimpleDateFormat simpleDateFormat = new SimpleDateFormat(PATTERN);
            try {
                lastModified = simpleDateFormat.parse(header.getValue()).getTime();
            } catch (ParseException e) {
                throw new SwiftException("Failed to parse " + header.toString(), e);
            }
        }
    }
    if (lastModified == 0) {
        lastModified = System.currentTimeMillis();
    }
    Path correctSwiftPath = getCorrectSwiftPath(path);
    return new SwiftFileStatus(length, isDir, 1, getBlocksize(), lastModified, correctSwiftPath);
}
Also used : Path(org.apache.hadoop.fs.Path) SwiftObjectPath(org.apache.hadoop.fs.swift.util.SwiftObjectPath) Header(org.apache.commons.httpclient.Header) FileNotFoundException(java.io.FileNotFoundException) SwiftException(org.apache.hadoop.fs.swift.exceptions.SwiftException) ParseException(java.text.ParseException) SimpleDateFormat(java.text.SimpleDateFormat) SwiftObjectPath(org.apache.hadoop.fs.swift.util.SwiftObjectPath)

Aggregations

Header (org.apache.commons.httpclient.Header)88 GetMethod (org.apache.commons.httpclient.methods.GetMethod)22 Test (org.junit.Test)22 IOException (java.io.IOException)20 HttpClient (org.apache.commons.httpclient.HttpClient)19 PostMethod (org.apache.commons.httpclient.methods.PostMethod)19 HttpMethod (org.apache.commons.httpclient.HttpMethod)17 NameValuePair (org.apache.commons.httpclient.NameValuePair)16 ArrayList (java.util.ArrayList)15 InputStream (java.io.InputStream)13 HttpException (org.apache.commons.httpclient.HttpException)13 PutMethod (org.apache.commons.httpclient.methods.PutMethod)10 Account (com.zimbra.cs.account.Account)9 ServiceException (com.zimbra.common.service.ServiceException)6 UsernamePasswordCredentials (org.apache.commons.httpclient.UsernamePasswordCredentials)6 HttpTest (org.apache.sling.commons.testing.integration.HttpTest)6 Pair (com.zimbra.common.util.Pair)5 ByteArrayInputStream (java.io.ByteArrayInputStream)5 HashMap (java.util.HashMap)5 DefaultHttpMethodRetryHandler (org.apache.commons.httpclient.DefaultHttpMethodRetryHandler)5