Run this command to install the SDK:

npm install cloudmersive-ocr-api-client --save


Or add this snippet to your package.json:

  "dependencies": {
    "cloudmersive-ocr-api-client": "^1.3.3"
  }


var CloudmersiveOcrApiClient = require('cloudmersive-ocr-api-client');
var defaultClient = CloudmersiveOcrApiClient.ApiClient.instance;

// Configure API key authorization: Apikey
var Apikey = defaultClient.authentications['Apikey'];
Apikey.apiKey = 'YOUR API KEY';



var apiInstance = new CloudmersiveOcrApiClient.ImageOcrApi();

var imageFile = Buffer.from(fs.readFileSync("C:\\temp\\inputfile").buffer); // File | Image file to perform OCR on.  Common file formats such as PNG, JPEG are supported.

var opts = { 
  'bucketID': "bucketID_example", // String | Bucket ID of the Configuration Bucket storing the form templates
  'bucketSecretKey': "bucketSecretKey_example", // String | Bucket Secret Key of the Configuration Bucket storing the form templates
  'recognitionMode': "recognitionMode_example", // String | Optional, enable advanced recognition mode by specifying 'Advanced', enable handwriting recognition by specifying 'EnableHandwriting'.  Default is disabled.
  'preprocessing': "preprocessing_example", // String | Optional, preprocessing mode, default is 'Auto'.  Possible values are None (no preprocessing of the image), and Auto (automatic image enhancement of the image - including automatic unrotation of the image - before OCR is applied; this is recommended).  Set this to 'None' if you do not want to use automatic image unrotation and enhancement.
  'diagnostics': "diagnostics_example" // String | Optional, diagnostics mode, default is 'false'.  Possible values are 'true' (will set DiagnosticImage to a diagnostic PNG image in the result), and 'false' (no diagnostics are enabled; this is recommended for best performance).
};

var callback = function(error, data, response) {
  if (error) {
    console.error(error);
  } else {
    console.log('API called successfully. Returned data: ' + data);
  }
};
apiInstance.imageOcrPhotoRecognizeFormAdvanced(imageFile, opts, callback);

Run this command to install the SDK:

pip install cloudmersive-ocr-api-client


from __future__ import print_function
import time
import cloudmersive_ocr_api_client
from cloudmersive_ocr_api_client.rest import ApiException
from pprint import pprint

# Configure API key authorization: Apikey
configuration = cloudmersive_ocr_api_client.Configuration()
configuration.api_key['Apikey'] = 'YOUR_API_KEY'



# create an instance of the API class
api_instance = cloudmersive_ocr_api_client.ImageOcrApi(cloudmersive_ocr_api_client.ApiClient(configuration))
image_file = '/path/to/file' # file | Image file to perform OCR on.  Common file formats such as PNG, JPEG are supported.
bucket_id = 'bucket_id_example' # str | Bucket ID of the Configuration Bucket storing the form templates (optional)
bucket_secret_key = 'bucket_secret_key_example' # str | Bucket Secret Key of the Configuration Bucket storing the form templates (optional)
recognition_mode = 'recognition_mode_example' # str | Optional, enable advanced recognition mode by specifying 'Advanced', enable handwriting recognition by specifying 'EnableHandwriting'.  Default is disabled. (optional)
preprocessing = 'preprocessing_example' # str | Optional, preprocessing mode, default is 'Auto'.  Possible values are None (no preprocessing of the image), and Auto (automatic image enhancement of the image - including automatic unrotation of the image - before OCR is applied; this is recommended).  Set this to 'None' if you do not want to use automatic image unrotation and enhancement. (optional)
diagnostics = 'diagnostics_example' # str | Optional, diagnostics mode, default is 'false'.  Possible values are 'true' (will set DiagnosticImage to a diagnostic PNG image in the result), and 'false' (no diagnostics are enabled; this is recommended for best performance). (optional)

try:
    # Recognize a photo of a form, extract key fields using stored templates
    api_response = api_instance.image_ocr_photo_recognize_form_advanced(image_file, bucket_id=bucket_id, bucket_secret_key=bucket_secret_key, recognition_mode=recognition_mode, preprocessing=preprocessing, diagnostics=diagnostics)
    pprint(api_response)
except ApiException as e:
    print("Exception when calling ImageOcrApi->image_ocr_photo_recognize_form_advanced: %s\n" % e)

Run this command to install the SDK:

Install-Package Cloudmersive.APIClient.NET.OCR -Version 3.0.5


using System;
using System.Diagnostics;
using Cloudmersive.APIClient.NET.OCR.Api;
using Cloudmersive.APIClient.NET.OCR.Client;
using Cloudmersive.APIClient.NET.OCR.Model;

namespace Example
{
    public class ImageOcrPhotoRecognizeFormAdvancedExample
    {
        public void main()
        {
            // Configure API key authorization: Apikey
            Configuration.Default.AddApiKey("Apikey", "YOUR_API_KEY");
            
            

            var apiInstance = new ImageOcrApi();
            var imageFile = new System.IO.FileStream("C:\\temp\\inputfile", System.IO.FileMode.Open); // System.IO.Stream | Image file to perform OCR on.  Common file formats such as PNG, JPEG are supported.
            var bucketID = bucketID_example;  // string | Bucket ID of the Configuration Bucket storing the form templates (optional) 
            var bucketSecretKey = bucketSecretKey_example;  // string | Bucket Secret Key of the Configuration Bucket storing the form templates (optional) 
            var recognitionMode = recognitionMode_example;  // string | Optional, enable advanced recognition mode by specifying 'Advanced', enable handwriting recognition by specifying 'EnableHandwriting'.  Default is disabled. (optional) 
            var preprocessing = preprocessing_example;  // string | Optional, preprocessing mode, default is 'Auto'.  Possible values are None (no preprocessing of the image), and Auto (automatic image enhancement of the image - including automatic unrotation of the image - before OCR is applied; this is recommended).  Set this to 'None' if you do not want to use automatic image unrotation and enhancement. (optional) 
            var diagnostics = diagnostics_example;  // string | Optional, diagnostics mode, default is 'false'.  Possible values are 'true' (will set DiagnosticImage to a diagnostic PNG image in the result), and 'false' (no diagnostics are enabled; this is recommended for best performance). (optional) 

            try
            {
                // Recognize a photo of a form, extract key fields using stored templates
                FormRecognitionResult result = apiInstance.ImageOcrPhotoRecognizeFormAdvanced(imageFile, bucketID, bucketSecretKey, recognitionMode, preprocessing, diagnostics);
                Debug.WriteLine(result);
            }
            catch (Exception e)
            {
                Debug.Print("Exception when calling ImageOcrApi.ImageOcrPhotoRecognizeFormAdvanced: " + e.Message );
            }
        }
    }
}

To install with Maven, add a reference to the repository in pom.xml:

<repositories>
    <repository>
        <id>jitpack.io</id>
        <url>https://jitpack.io</url>
    </repository>
</repositories>


And add a reference to the dependency in pom.xml:

<dependencies>
<dependency>
    <groupId>com.github.Cloudmersive</groupId>
    <artifactId>Cloudmersive.APIClient.Java</artifactId>
    <version>v4.25</version>
</dependency>
</dependencies>


To install with Gradle, add it in your root build.gradle at the end of repositories:

allprojects {
	repositories {
		...
		maven { url 'https://jitpack.io' }
	}
}


And add the dependency in build.gradle:

dependencies {
        implementation 'com.github.Cloudmersive:Cloudmersive.APIClient.Java:v4.25'
}


// Import classes:
//import com.cloudmersive.client.invoker.ApiClient;
//import com.cloudmersive.client.invoker.ApiException;
//import com.cloudmersive.client.invoker.Configuration;
//import com.cloudmersive.client.invoker.auth.*;
//import com.cloudmersive.client.ImageOcrApi;

ApiClient defaultClient = Configuration.getDefaultApiClient();

// Configure API key authorization: Apikey
ApiKeyAuth Apikey = (ApiKeyAuth) defaultClient.getAuthentication("Apikey");
Apikey.setApiKey("YOUR API KEY");
// Uncomment the following line to set a prefix for the API key, e.g. "Token" (defaults to null)
//Apikey.setApiKeyPrefix("Token");

ImageOcrApi apiInstance = new ImageOcrApi();
File imageFile = new File("/path/to/file"); // File | Image file to perform OCR on.  Common file formats such as PNG, JPEG are supported.
String bucketID = "bucketID_example"; // String | Bucket ID of the Configuration Bucket storing the form templates
String bucketSecretKey = "bucketSecretKey_example"; // String | Bucket Secret Key of the Configuration Bucket storing the form templates
String recognitionMode = "recognitionMode_example"; // String | Optional, enable advanced recognition mode by specifying 'Advanced', enable handwriting recognition by specifying 'EnableHandwriting'.  Default is disabled.
String preprocessing = "preprocessing_example"; // String | Optional, preprocessing mode, default is 'Auto'.  Possible values are None (no preprocessing of the image), and Auto (automatic image enhancement of the image - including automatic unrotation of the image - before OCR is applied; this is recommended).  Set this to 'None' if you do not want to use automatic image unrotation and enhancement.
String diagnostics = "diagnostics_example"; // String | Optional, diagnostics mode, default is 'false'.  Possible values are 'true' (will set DiagnosticImage to a diagnostic PNG image in the result), and 'false' (no diagnostics are enabled; this is recommended for best performance).
try {
    FormRecognitionResult result = apiInstance.imageOcrPhotoRecognizeFormAdvanced(imageFile, bucketID, bucketSecretKey, recognitionMode, preprocessing, diagnostics);
    System.out.println(result);
} catch (ApiException e) {
    System.err.println("Exception when calling ImageOcrApi#imageOcrPhotoRecognizeFormAdvanced");
    e.printStackTrace();
}

Run this command to install the SDK:

composer require cloudmersive/cloudmersive_ocr_api_client


<?php
require_once(__DIR__ . '/vendor/autoload.php');

// Configure API key authorization: Apikey
$config = Swagger\Client\Configuration::getDefaultConfiguration()->setApiKey('Apikey', 'YOUR_API_KEY');



$apiInstance = new Swagger\Client\Api\ImageOcrApi(
    
    
    new GuzzleHttp\Client(),
    $config
);
$image_file = "/path/to/file"; // \SplFileObject | Image file to perform OCR on.  Common file formats such as PNG, JPEG are supported.
$bucket_id = "bucket_id_example"; // string | Bucket ID of the Configuration Bucket storing the form templates
$bucket_secret_key = "bucket_secret_key_example"; // string | Bucket Secret Key of the Configuration Bucket storing the form templates
$recognition_mode = "recognition_mode_example"; // string | Optional, enable advanced recognition mode by specifying 'Advanced', enable handwriting recognition by specifying 'EnableHandwriting'.  Default is disabled.
$preprocessing = "preprocessing_example"; // string | Optional, preprocessing mode, default is 'Auto'.  Possible values are None (no preprocessing of the image), and Auto (automatic image enhancement of the image - including automatic unrotation of the image - before OCR is applied; this is recommended).  Set this to 'None' if you do not want to use automatic image unrotation and enhancement.
$diagnostics = "diagnostics_example"; // string | Optional, diagnostics mode, default is 'false'.  Possible values are 'true' (will set DiagnosticImage to a diagnostic PNG image in the result), and 'false' (no diagnostics are enabled; this is recommended for best performance).

try {
    $result = $apiInstance->imageOcrPhotoRecognizeFormAdvanced($image_file, $bucket_id, $bucket_secret_key, $recognition_mode, $preprocessing, $diagnostics);
    print_r($result);
} catch (Exception $e) {
    echo 'Exception when calling ImageOcrApi->imageOcrPhotoRecognizeFormAdvanced: ', $e->getMessage(), PHP_EOL;
}
?>

Add the Objective-C client to your Podfile:

pod 'CloudmersiveOCRApiClient', '~> 1.0'


CMDefaultConfiguration *apiConfig = [CMDefaultConfiguration sharedConfig];

// Configure API key authorization: (authentication scheme: Apikey)
[apiConfig setApiKey:@"YOUR_API_KEY" forApiKeyIdentifier:@"Apikey"];




NSURL* imageFile = [NSURL fileURLWithPath:@"/path/to/file"]; // Image file to perform OCR on.  Common file formats such as PNG, JPEG are supported.
NSString* bucketID = @"bucketID_example"; // Bucket ID of the Configuration Bucket storing the form templates (optional)
NSString* bucketSecretKey = @"bucketSecretKey_example"; // Bucket Secret Key of the Configuration Bucket storing the form templates (optional)
NSString* recognitionMode = @"recognitionMode_example"; // Optional, enable advanced recognition mode by specifying 'Advanced', enable handwriting recognition by specifying 'EnableHandwriting'.  Default is disabled. (optional)
NSString* preprocessing = @"preprocessing_example"; // Optional, preprocessing mode, default is 'Auto'.  Possible values are None (no preprocessing of the image), and Auto (automatic image enhancement of the image - including automatic unrotation of the image - before OCR is applied; this is recommended).  Set this to 'None' if you do not want to use automatic image unrotation and enhancement. (optional)
NSString* diagnostics = @"diagnostics_example"; // Optional, diagnostics mode, default is 'false'.  Possible values are 'true' (will set DiagnosticImage to a diagnostic PNG image in the result), and 'false' (no diagnostics are enabled; this is recommended for best performance). (optional)

CMImageOcrApi*apiInstance = [[CMImageOcrApi alloc] init];

// Recognize a photo of a form, extract key fields using stored templates
[apiInstance imageOcrPhotoRecognizeFormAdvancedWithImageFile:imageFile
              bucketID:bucketID
              bucketSecretKey:bucketSecretKey
              recognitionMode:recognitionMode
              preprocessing:preprocessing
              diagnostics:diagnostics
          completionHandler: ^(CMFormRecognitionResult* output, NSError* error) {
                        if (output) {
                            NSLog(@"%@", output);
                        }
                        if (error) {
                            NSLog(@"Error calling CMImageOcrApi->imageOcrPhotoRecognizeFormAdvanced: %@", error);
                        }
                    }];

Add the Ruby client to your Gemfile:

gem 'cloudmersive-ocr-api-client', '~> 2.0.2'


# load the gem
require 'cloudmersive-ocr-api-client'
# setup authorization
CloudmersiveOcrApiClient.configure do |config|
  # Configure API key authorization: Apikey
  config.api_key['Apikey'] = 'YOUR API KEY'
  # Uncomment the following line to set a prefix for the API key, e.g. 'Bearer' (defaults to nil)
  #config.api_key_prefix['Apikey'] = 'Bearer'
end

api_instance = CloudmersiveOcrApiClient::ImageOcrApi.new

image_file = File.new('/path/to/inputfile') # File | Image file to perform OCR on.  Common file formats such as PNG, JPEG are supported.

opts = { 
  bucket_id: 'bucket_id_example', # String | Bucket ID of the Configuration Bucket storing the form templates
  bucket_secret_key: 'bucket_secret_key_example', # String | Bucket Secret Key of the Configuration Bucket storing the form templates
  recognition_mode: 'recognition_mode_example', # String | Optional, enable advanced recognition mode by specifying 'Advanced', enable handwriting recognition by specifying 'EnableHandwriting'.  Default is disabled.
  preprocessing: 'preprocessing_example', # String | Optional, preprocessing mode, default is 'Auto'.  Possible values are None (no preprocessing of the image), and Auto (automatic image enhancement of the image - including automatic unrotation of the image - before OCR is applied; this is recommended).  Set this to 'None' if you do not want to use automatic image unrotation and enhancement.
  diagnostics: 'diagnostics_example' # String | Optional, diagnostics mode, default is 'false'.  Possible values are 'true' (will set DiagnosticImage to a diagnostic PNG image in the result), and 'false' (no diagnostics are enabled; this is recommended for best performance).
}

begin
  #Recognize a photo of a form, extract key fields using stored templates
  result = api_instance.image_ocr_photo_recognize_form_advanced(image_file, opts)
  p result
rescue CloudmersiveOcrApiClient::ApiError => e
  puts "Exception when calling ImageOcrApi->image_ocr_photo_recognize_form_advanced: #{e}"
end

Download and copy the /client folder into your Apex project:

Download Apex Client

SwagImageOcrApi api = new SwagImageOcrApi();
SwagClient client = api.getClient();

// Configure API key authorization: Apikey
ApiKeyAuth Apikey = (ApiKeyAuth) client.getAuthentication('Apikey');
Apikey.setApiKey('YOUR API KEY');

Map<String, Object> params = new Map<String, Object>{
    'imageFile' => Blob.valueOf('Sample text file\nContents'),
    'bucketID' => 'bucketID_example',
    'bucketSecretKey' => 'bucketSecretKey_example',
    'recognitionMode' => 'recognitionMode_example',
    'preprocessing' => 'preprocessing_example',
    'diagnostics' => 'diagnostics_example'
};

try {
    // cross your fingers
    SwagFormRecognitionResult result = api.imageOcrPhotoRecognizeFormAdvanced(params);
    System.debug(result);
} catch (Swagger.ApiException e) {
    // ...handle your exceptions
}

Install libcurl in your C/C++ project:

libcurl/7.75.0
CURL *curl;
CURLcode res;
curl = curl_easy_init();
if(curl) {
     curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, "POST");
     curl_easy_setopt(curl, CURLOPT_URL, "https://api.cloudmersive.com/ocr/photo/recognize/form/advanced");
     curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
     curl_easy_setopt(curl, CURLOPT_DEFAULT_PROTOCOL, "https");
     struct curl_slist *headers = NULL;
     headers = curl_slist_append(headers, "bucketID: <string>");
     headers = curl_slist_append(headers, "bucketSecretKey: <string>");
     headers = curl_slist_append(headers, "recognitionMode: <string>");
     headers = curl_slist_append(headers, "preprocessing: <string>");
     headers = curl_slist_append(headers, "diagnostics: <string>");
     headers = curl_slist_append(headers, "Content-Type: multipart/form-data");
     headers = curl_slist_append(headers, "Apikey: YOUR-API-KEY-HERE");
     curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
     curl_mime *mime;
     curl_mimepart *part;
     mime = curl_mime_init(curl);
     part = curl_mime_addpart(mime);
     curl_mime_name(part, "imageFile");
     curl_mime_filedata(part, "/path/to/file");
     curl_easy_setopt(curl, CURLOPT_MIMEPOST, mime);
     res = curl_easy_perform(curl);
     curl_mime_free(mime);
}
curl_easy_cleanup(curl);
curl --location --request POST 'https://api.cloudmersive.com/ocr/photo/recognize/form/advanced' \
--header 'bucketID: <string>' \
--header 'bucketSecretKey: <string>' \
--header 'recognitionMode: <string>' \
--header 'preprocessing: <string>' \
--header 'diagnostics: <string>' \
--header 'Content-Type: multipart/form-data' \
--header 'Apikey: YOUR-API-KEY-HERE' \
--form 'imageFile=@"/path/to/file"'
import Foundation
#if canImport(FoundationNetworking)
import FoundationNetworking
#endif

var semaphore = DispatchSemaphore (value: 0)

let parameters = [
     [
          "key": "imageFile",
          "src": "/path/to/file",
          "type": "file"
     ]] as [[String : Any]]

let boundary = "Boundary-\(UUID().uuidString)"
var body = ""
var error: Error? = nil
for param in parameters {
     if param["disabled"] == nil {
          let paramName = param["key"]!
          body += "--\(boundary)\r\n"
          body += "Content-Disposition:form-data; name=\"\(paramName)\""
          if param["contentType"] != nil {
               body += "\r\nContent-Type: \(param["contentType"] as! String)"
          }
          let paramType = param["type"] as! String
          if paramType == "text" {
               let paramValue = param["value"] as! String
               body += "\r\n\r\n\(paramValue)\r\n"
          } else {
               let paramSrc = param["src"] as! String
               let fileData = try NSData(contentsOfFile:paramSrc, options:[]) as Data
               let fileContent = String(data: fileData, encoding: .utf8)!
               body += "; filename=\"\(paramSrc)\"\r\n"
                 + "Content-Type: \"content-type header\"\r\n\r\n\(fileContent)\r\n"
          }
     }
}
body += "--\(boundary)--\r\n";
let postData = body.data(using: .utf8)

var request = URLRequest(url: URL(string: "https://api.cloudmersive.com/ocr/photo/recognize/form/advanced")!,timeoutInterval: Double.infinity)
request.addValue("<string>", forHTTPHeaderField: "bucketID")
request.addValue("<string>", forHTTPHeaderField: "bucketSecretKey")
request.addValue("<string>", forHTTPHeaderField: "recognitionMode")
request.addValue("<string>", forHTTPHeaderField: "preprocessing")
request.addValue("<string>", forHTTPHeaderField: "diagnostics")
request.addValue("multipart/form-data", forHTTPHeaderField: "Content-Type")
request.addValue("YOUR-API-KEY-HERE", forHTTPHeaderField: "Apikey")
request.addValue("multipart/form-data; boundary=\(boundary)", forHTTPHeaderField: "Content-Type")

request.httpMethod = "POST"
request.httpBody = postData

let task = URLSession.shared.dataTask(with: request) { data, response, error in 
     guard let data = data else {
          print(String(describing: error))
          semaphore.signal()
          return
     }
     print(String(data: data, encoding: .utf8)!)
     semaphore.signal()
}

task.resume()
semaphore.wait()

This code snippet uses the built-in JavaScript XHR request capability

var data = new FormData();
data.append("imageFile", fileInput.files[0], "file");
 
var xhr = new XMLHttpRequest();
xhr.withCredentials = true;

xhr.addEventListener("readystatechange", function() {
     if(this.readyState === 4) {
          console.log(this.responseText);
     }
});

xhr.open("POST", "https://api.cloudmersive.com/ocr/photo/recognize/form/advanced");
xhr.setRequestHeader("bucketID", "<string>");
xhr.setRequestHeader("bucketSecretKey", "<string>");
xhr.setRequestHeader("recognitionMode", "<string>");
xhr.setRequestHeader("preprocessing", "<string>");
xhr.setRequestHeader("diagnostics", "<string>");

xhr.setRequestHeader("Apikey", "YOUR-API-KEY-HERE");

xhr.send(data);
package main

import (
     "fmt"
     "bytes"
     "mime/multipart"
     "os"
     "path/filepath"
     "io"
     "net/http"
     "io/ioutil"
)

func main() {

     url := "https://api.cloudmersive.com/ocr/photo/recognize/form/advanced"
     method := "POST"

     payload := &bytes.Buffer{}
     writer := multipart.NewWriter(payload)
     file, errFile1 := os.Open("/path/to/file")
     defer file.Close()
     part1,
         errFile1 := writer.CreateFormFile("imageFile",filepath.Base("/path/to/file"))
     _, errFile1 = io.Copy(part1, file)
     if errFile1 != nil {
          fmt.Println(errFile1)
          return
     }
     err := writer.Close()
     if err != nil {
          fmt.Println(err)
          return
     }


     client := &http.Client {
     }
     req, err := http.NewRequest(method, url, payload)

     if err != nil {
          fmt.Println(err)
          return
     }
     req.Header.Add("bucketID", "<string>")
     req.Header.Add("bucketSecretKey", "<string>")
     req.Header.Add("recognitionMode", "<string>")
     req.Header.Add("preprocessing", "<string>")
     req.Header.Add("diagnostics", "<string>")
     req.Header.Add("Content-Type", "multipart/form-data")
     req.Header.Add("Apikey", "YOUR-API-KEY-HERE")

     req.Header.Set("Content-Type", writer.FormDataContentType())
     res, err := client.Do(req)
     if err != nil {
          fmt.Println(err)
          return
     }
     defer res.Body.Close()

     body, err := ioutil.ReadAll(res.Body)
     if err != nil {
          fmt.Println(err)
          return
     }
     fmt.Println(string(body))
}

Walkthrough Video