Reputation: 89
API Docs Description:
Handle Newly Implemented Rate Limits:
There is a rate limit of 4 requests per second. If you go over this limit you will receive a response with status code 429 Too Many Requests.
You are able to have up to 8 open requests at any moment in time. This can occur if requests take longer than 1 second to respond when multiple requests are being made.
I have tried using better-queue
to queue my calls, but whatever combination I try to do I am not able to schedule 4 api calls per second with max concurrent calls staying at 8 if previous calls don't resolve.
I couldn't get it to work as above, but this is my testing code:
const Queue = require('better-queue');
function randomNumber(max, min) {
return Math.floor(Math.random() * (max - min + 1)) + min;
}
let queueCount = 0
const q = new Queue(function (input, cb) {
console.log('@@@ task started -->', input)
const result = input
setTimeout(() => cb(null, result), randomNumber(4000, 2000))
}, {
concurrent: 4,
// filo: true,
// batchSize: 4,
// batchDelay: 1000,
// batchDelayTimeout: 1000,
// afterProcessDelay: 1000,
})
q.push({ id: ++queueCount })
q.push({ id: ++queueCount })
q.push({ id: ++queueCount })
q.push({ id: ++queueCount })
q.push({ id: ++queueCount })
q.push({ id: ++queueCount })
q.push({ id: ++queueCount })
q.push({ id: ++queueCount })
q.push({ id: ++queueCount })
q.push({ id: ++queueCount })
q.on('task_finish', function (taskId, result, stats) {
console.log('@@@ task_finish <--', taskId, result, stats)
})
q.on('task_failed', function (taskId, err, stats) {
console.log('### task_failed', taskId, err, stats)
})
q.on('empty', function (){
console.log('@@@ queue empty')
})
q.on('drain', function (){
console.log('@@@ queue drain')
})
which produced below results:
@@@ task started --> { id: 1 }
@@@ task started --> { id: 2 }
@@@ task started --> { id: 3 }
@@@ task started --> { id: 4 }
@@@ task_finish <-- 2 { id: 2 } { elapsed: 2454 }
@@@ task started --> { id: 5 }
@@@ task_finish <-- 4 { id: 4 } { elapsed: 2483 }
@@@ task started --> { id: 6 }
@@@ task_finish <-- 1 { id: 1 } { elapsed: 2669 }
@@@ task started --> { id: 7 }
@@@ task_finish <-- 3 { id: 3 } { elapsed: 2945 }
@@@ task started --> { id: 8 }
@@@ task_finish <-- 5 { id: 5 } { elapsed: 4792 }
@@@ task started --> { id: 9 }
@@@ task_finish <-- 7 { id: 7 } { elapsed: 5119 }
@@@ task started --> { id: 10 }
@@@ task_finish <-- 6 { id: 6 } { elapsed: 5533 }
@@@ task_finish <-- 8 { id: 8 } { elapsed: 6414 }
@@@ task_finish <-- 9 { id: 9 } { elapsed: 7324 }
@@@ task_finish <-- 10 { id: 10 } { elapsed: 8876 }
@@@ queue empty
@@@ queue drain
It keeps concurrents requests at 4, which I need at 8 but to send 4 requests per second. looking for a solution, any help is appreciated.
Upvotes: 1
Views: 1703
Reputation: 89
So I did some research to find a good solution to the problem I asked above and I came across this npm package: https://www.npmjs.com/package/qrate It does exactly what I need, keeping the concurrent requests to 8 and rate limiting 4 requests per second; here is my code:
// require qrate library
const qrate = require('qrate')
function randomNumber(max, min) {
return Math.floor(Math.random() * (max - min + 1)) + min;
}
// mark the start time of this script
const start = new Date().getTime();
// worker function that calls back after 100ms - 10000ms
const worker = function(data, done) {
// your worker code goes here
// 'data' contains the queue to work on
// call 'done' when finished.
// output a message including a timestamp
console.log('Processing', data, '@', new Date().getTime() - start, 'ms')
// call the 'done' function
setTimeout(done, randomNumber(10000, 100))
};
// create a queue with default properties (concurrency = 1, rateLimit = null)
// using our 'worker' function to process each item in the queue
const q = qrate(worker, 8, 4)
// add things to the queue
for (let i = 1; i <= 20; i++) {
q.push({ i: i });
}
q.drain = () => {
// all of the queue items have been processed
console.log('@@@ queue drain', '@', new Date().getTime() - start, 'ms')
}
which produces below results:
Processing { i: 1 } @ 54 ms
Processing { i: 2 } @ 56 ms
Processing { i: 3 } @ 56 ms
Processing { i: 4 } @ 56 ms
Processing { i: 5 } @ 1002 ms
Processing { i: 6 } @ 1003 ms
Processing { i: 7 } @ 1003 ms
Processing { i: 8 } @ 1003 ms
Processing { i: 9 } @ 3664 ms
Processing { i: 10 } @ 4308 ms
Processing { i: 11 } @ 4941 ms
Processing { i: 12 } @ 5570 ms
Processing { i: 13 } @ 6660 ms
Processing { i: 14 } @ 7254 ms
Processing { i: 15 } @ 7479 ms
Processing { i: 16 } @ 8261 ms
Processing { i: 17 } @ 9172 ms
Processing { i: 18 } @ 9975 ms
Processing { i: 19 } @ 10214 ms
Processing { i: 20 } @ 10406 ms
@@@ queue drain @ 17445 ms
Cheers!
Upvotes: 4