Reputation: 19
How to build framework in post man Tool
please help me on this and do we have any tool to integrate and run from that tool postman.
thanks in advance!! asif
Upvotes: 0
Views: 7224
Reputation: 5287
Newman is a companion tool for postman that runs postman tests from the command line. It also has support for using newman as a library. We use it for continuous testing on our production servers.
You can develop any framework you want with using postman as part of that framework.
Here is the test I used to test 519 known good values against the results returned from a server. Data is populated if you supply a data file in the postman collection runner or if you supply it as a command line option for newman.
tests["Status code " + responseCode.code] = responseCode.code === 200;
var response = JSON.parse(responseBody);
tests[ 'response.errorsCount == ' + response.errorsCount ] = response.errorsCount === 0 ;
var outputs = response.outputs ;
var outputsLength = outputs.length ;
if( data.hasOwnProperty("outputs") ) {
tests[ 'data.outputs.length == response.outputs.length ' ] = data.outputs.length == response.outputs.length ;
var dataOutputsLength = data.outputs.length ;
for ( index = 0; index < outputsLength ; index++){
var output = outputs[index] ;
var expectedOutput = data.outputs[index] ;
tests[expectedOutput.cell + ': ' + expectedOutput.value + ' == ' + output.cell + ': ' + output.value ] = expectedOutput.cell == output.cell && expectedOutput.value == output.value ;
}
}
Here is an example using newman running the same tests.
newman --collection score-card-export-all-respondents.postman_collection --environment melbourne.postman_environment --data score-card-export-all-respondents-data.json --requestTimeout 60000
Here is a script we use to test one of our servers. Called by ./postman-runner.js --tests staging.json. The repository url is postman-runner
#!/usr/bin/env node
var Newman = require('newman') ;
var ResponseExporter = require('newman/src/utilities/ResponseExporter') ;
var SparkPost = require('sparkpost');
var fs = require('fs') ;
var path = require('path') ;
var argv = require('minimist')(process.argv.slice(2)) ;
var errors = [] ;
var testResultsTests = [] ;
function EvaluateNewmanTestResults(options) {
arguments = eval(options) ;
if (!arguments){
arguments = {} ;
}
this.created = new Date() ;
if (arguments.name) {
this.name = name ;
} else {
this.name = this.created.toUTCString() ;
}
if (arguments.failed){
this.failed = failed ;
} else {
this.failed = [] ;
}
if( arguments.resultsPath){
this.resultsPath = arguments.resultsPath ;
} else {
this.resultsPath = null ;
}
this.tests = [] ;
this.urls = [] ;
this.runCount = 0 ;
this.failedCount = 0 ;
this.summary = function(){
return {name: this.name, urls: this.urls, runCount: this.runCount, failedCount: this.failedCount, failed: this.failed} ;
}
this.JSON = function(){
return JSON.stringify( this.summary ) ;
}
this.evaluateResults = function(results_){
this.tests = [] ;
this.testResults = results_ ;
this.overallResults = JSON.parse(fs.readFileSync(this.resultsPath, 'utf8')) ;
if ( this.overallResults.collection.name) {
this.name = this.overallResults.collection.name ;
}
for( testResultsIndex=0; testResultsIndex<this.testResults.length;testResultsIndex++){
testResult = this.testResults[testResultsIndex] ;
url = testResult.url ;
this.urls.push(url) ;
tests = testResult.tests ;
for (var key in tests){
value = tests[key] ;
this.runCount++ ;
if ( value ) {
/* passed */
} else {
/* failed */
this.failed.push(key) ;
this.failedCount++ ;
}
}
}
}
}
function notifyViaSparkPost( key, arguments, from, subject, html, notify, results, summary ){
holdArguments = arguments ;
arguments = JSON.stringify(arguments) ;
/* failed tests is swapped into the html*/
failedTests = JSON.stringify(summary) ;
/* swap out {} to [] to avoid errors when populating html */
failedTests = failedTests.replace(/{{/g, '[[') ;
failedTests = failedTests.replace(/}}/g, ']]') ;
/* collection name is swapped into the html*/
collectionName = summary.name ;
var regularExpression = /(?:{)(.*?)(?:})/g
match = regularExpression.exec(subject);
while (match != null) {
subject = subject.replace( match[0], eval(match[1])) ;
match = regularExpression.exec(subject) ;
}
match = regularExpression.exec(html);
while (match != null) {
console.log(match[0]) ;
console.log(match[1]) ;
html = html.replace( match[0], eval(match[1])) ;
match = regularExpression.exec(html) ;
}
html = html.replace(/\[\[/g,'{{') ;
html = html.replace(/\]\]/g,'}}') ;
var sparkPost = new SparkPost(key);
sparkPost.transmissions.send({
transmissionBody: {
content: {
from: from,
subject: subject,
html: html
},
recipients: notify
}
}, function(err, res) {
if (err) {
console.log('Unexpected error sending email notification');
console.log(err);
} else {
console.dir({sent:'email', 'with': holdArguments, 'and': failedTests });
}
});
}
function nextTest (arguments,sparkpostApiKey,tests,failed,callback) {
var test = tests.shift() ;
if (!test){
callback(failed,arguments) ;
} else {
handleTest(arguments,sparkpostApiKey,test,tests,failed,callback) ;
}
}
function handleTest(arguments,sparkpostApiKey,test,tests,failed,callback){
var description = test.description ;
var resultsJson = null ;
var ran = {} ;
var newmanOptions = {} ;
var holdArguments = arguments ;
if ( description ) {
console.log('') ;
console.log( 'Running ' + description ) ;
}
var collection = test.collection.join(path.sep) ;
var environment = test.environment.join(path.sep) ;
dataFile = test.data
if ( dataFile ) {
dataFile = dataFile.join(path.sep) ;
}
results = test.results.join(path.sep) ;
requestTimeout = test.requestTimeout ;
collectionJson = JSON.parse(fs.readFileSync(collection, 'utf8')) ;
environmentJson = JSON.parse(fs.readFileSync(environment, 'utf8')) ;
newmanOptions = {
envJson: environmentJson ,
iterationCount: 1, // define the number of times the runner should run
outputFile: results, // the file to export to
responseHandler: "TestResponseHandler", // the response handler to use
asLibrary: true, // this makes sure the exit code is returned as an argument to the callback function
stopOnError: false,
dataFile: dataFile,
requestTimeout: requestTimeout
}
arguments = {collection:collection, environment:environment, dataFile:dataFile, results:results} ;
notificationArguments = {collection:collection, environment:environment, dataFile:dataFile, results:results} ;
ouch = new EvaluateNewmanTestResults() ;
ouch.resultsPath = results ;
ouch.runCount = 0 ;
if ( argv.simulateTestFailure ) {
resultsJson = JSON.parse(fs.readFileSync(results, 'utf8')) ;
ran = { name: collection.name, runCount:0, failedCount:0, exitCode: exitCode, failed: ["Some example failed tests","and another"] } ;
notifyViaSparkPost(
sparkpostApiKey,
arguments,
test.notify.sparkpost.from,
test.notify.sparkpost.subject,
test.notify.sparkpost.html,
test.notify.sparkpost.recipients,
resultsJson,
ran
) ;
nextTest(holdArguments, sparkpostApiKey, tests, failed, callback) ;
} else {
/* clear the results from any previous run */
ResponseExporter._results = [] ;
Newman.execute(collectionJson, newmanOptions, function(exitCode){
ouch.evaluateResults(ResponseExporter._results) ;
console.dir(ouch.summary()) ;
if (!holdArguments.totalTestsFailed) {
holdArguments.totalTestsFailed = 0 ;
}
if (!holdArguments.totalTestsRun) {
holdArguments.totalTestsRun = 0 ;
}
holdArguments.totalTestsFailed = holdArguments.totalTestsFailed + ouch.failedCount ;
holdArguments.totalTestsRun = holdArguments.totalTestsRun + ouch.runCount ;
if (ouch.failedCount>0){
notifyViaSparkPost(
sparkpostApiKey,
notificationArguments,
test.notify.sparkpost.from,
test.notify.sparkpost.subject,
test.notify.sparkpost.html,
test.notify.sparkpost.recipients,
resultsJson,
ouch.summary() ) ;
}
nextTest(holdArguments, sparkpostApiKey, tests, failed, callback) ;
}) ;
}
}
if ( !argv.tests ) {
errors.push('--tests parameter is missing') ;
} else {
if( !fs.existsSync(argv.tests)){
errors.push( argv.tests + ' is an invalid path') ;
}
}
if ( errors.length > 0 ) {
console.dir({ errors: errors }) ;
} else {
fs.readFile( argv.tests, 'utf8', function( error, data) {
var tests = JSON.parse(data) ;
var sparkpostApiKey = tests.sparkpostApiKey ;
var run = tests.run ; // array of tests to run
var failed = [] ;
argv.totalTestsRun = 0 ;
argv.totalTestsFailed = 0 ;
nextTest(argv, sparkpostApiKey, run, failed, function(failed,arguments){
console.log('finished test runs') ;
if ( failed.length > 0){
console.dir(failed) ;
}
console.dir({ totalTestsRun: arguments.totalTestsRun, totalTestsFailed: arguments.totalTestsFailed } ) ;
}) ;
}) ;
}
Upvotes: 5
Reputation: 3748
I would not rely on Postman as a base for testing framework.
Postman is really great for debugging and stuff, but i doubt it's a best tool to hook up well with the 'continuous build integration' & to provide a comprehensive reports.
I would consider investigating Apache jMeter.
It's a bit more complicated, but it does have a dozens of uber-cool features, plugins, etc and it easily integrates with whatever you want. And it does everything the postman does.
On my project we use Postman for quick checks when coding is being done and for Availability & Load test - jMeter.
Upvotes: 4