var CloudmersiveSpeechApiClient = require('cloudmersive-speech-api-client');
var defaultClient = CloudmersiveSpeechApiClient.ApiClient.instance;

// Configure API key authorization: Apikey
var Apikey = defaultClient.authentications['Apikey'];
Apikey.apiKey = 'YOUR API KEY';



var apiInstance = new CloudmersiveSpeechApiClient.RecognizeApi();

var speechFile = Buffer.from(fs.readFileSync("C:\\temp\\inputfile").buffer); // File | Speech file to perform the operation on.  Common file formats such as WAV, MP3 are supported.


var callback = function(error, data, response) {
  if (error) {
    console.error(error);
  } else {
    console.log('API called successfully. Returned data: ' + data);
  }
};
apiInstance.recognizeFile(speechFile, callback);
<?php
require_once(__DIR__ . '/vendor/autoload.php');

// Configure API key authorization: Apikey
$config = Swagger\Client\Configuration::getDefaultConfiguration()->setApiKey('Apikey', 'YOUR_API_KEY');



$apiInstance = new Swagger\Client\Api\RecognizeApi(
    
    
    new GuzzleHttp\Client(),
    $config
);
$speech_file = "/path/to/file"; // \SplFileObject | Speech file to perform the operation on.  Common file formats such as WAV, MP3 are supported.

try {
    $result = $apiInstance->recognizeFile($speech_file);
    print_r($result);
} catch (Exception $e) {
    echo 'Exception when calling RecognizeApi->recognizeFile: ', $e->getMessage(), PHP_EOL;
}
?>
CMDefaultConfiguration *apiConfig = [CMDefaultConfiguration sharedConfig];

// Configure API key authorization: (authentication scheme: Apikey)
[apiConfig setApiKey:@"YOUR_API_KEY" forApiKeyIdentifier:@"Apikey"];




NSURL* speechFile = [NSURL fileURLWithPath:@"/path/to/file"]; // Speech file to perform the operation on.  Common file formats such as WAV, MP3 are supported.

CMRecognizeApi*apiInstance = [[CMRecognizeApi alloc] init];

// Recognize audio input as text using machine learning
[apiInstance recognizeFileWithSpeechFile:speechFile
          completionHandler: ^(CMSpeechRecognitionResult* output, NSError* error) {
                        if (output) {
                            NSLog(@"%@", output);
                        }
                        if (error) {
                            NSLog(@"Error calling CMRecognizeApi->recognizeFile: %@", error);
                        }
                    }];
# load the gem
require 'cloudmersive-voice-recognition-api-client'
# setup authorization
CloudmersiveVoiceRecognitionApiClient.configure do |config|
  # Configure API key authorization: Apikey
  config.api_key['Apikey'] = 'YOUR API KEY'
  # Uncomment the following line to set a prefix for the API key, e.g. 'Bearer' (defaults to nil)
  #config.api_key_prefix['Apikey'] = 'Bearer'
end

api_instance = CloudmersiveVoiceRecognitionApiClient::RecognizeApi.new

speech_file = File.new("/path/to/file") # File | Speech file to perform the operation on.  Common file formats such as WAV, MP3 are supported.


begin
  #Recognize audio input as text using machine learning
  result = api_instance.recognize_file(speech_file)
  p result
rescue CloudmersiveVoiceRecognitionApiClient::ApiError => e
  puts "Exception when calling RecognizeApi->recognize_file: #{e}"
end