HTTP 429 Too Many Requests indicates your Node.js application has exceeded the API rate limit. The server is asking your client to slow down request frequency.
The HTTP 429 status code is a client error response that signals rate limiting enforcement. When you see this error in Node.js, it means the remote server has detected too many requests from your application within a specified time window and is temporarily rejecting further requests. Rate limiting is a common protection mechanism used by APIs and web services to prevent abuse, ensure fair resource allocation, and maintain service stability. When you exceed the allowed request threshold, the server responds with 429 and typically includes headers indicating when you can retry. This error is different from authentication issues (401) or permission problems (403) - your credentials are valid, but you're simply making requests too quickly. The server expects you to implement proper throttling, backoff strategies, or queueing to stay within acceptable limits.
Examine the response headers to understand how long you need to wait before retrying:
const axios = require('axios');
axios.get('https://api.example.com/data')
.catch(error => {
if (error.response?.status === 429) {
const retryAfter = error.response.headers['retry-after'];
console.log('Rate limited. Retry after:', retryAfter, 'seconds');
// Also check other rate limit headers
const remaining = error.response.headers['x-ratelimit-remaining'];
const reset = error.response.headers['x-ratelimit-reset'];
console.log('Remaining requests:', remaining);
console.log('Limit resets at:', new Date(reset * 1000));
}
});The Retry-After header tells you exactly how many seconds to wait, or provides a timestamp when the limit resets.
Add intelligent retry handling that increases wait time with each subsequent failure:
async function fetchWithRetry(url, maxRetries = 3) {
let retries = 0;
let delay = 1000; // Start with 1 second
while (retries < maxRetries) {
try {
const response = await fetch(url);
if (response.status === 429) {
const retryAfter = response.headers.get('retry-after');
const waitTime = retryAfter
? parseInt(retryAfter) * 1000
: delay;
console.log(`Rate limited. Waiting ${waitTime}ms before retry ${retries + 1}/${maxRetries}`);
await new Promise(resolve => setTimeout(resolve, waitTime));
retries++;
delay *= 2; // Exponential backoff
continue;
}
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
return await response.json();
} catch (error) {
if (retries === maxRetries - 1) throw error;
retries++;
await new Promise(resolve => setTimeout(resolve, delay));
delay *= 2;
}
}
throw new Error('Max retries exceeded');
}This approach respects the server's Retry-After header while falling back to exponential backoff if not provided.
Install and configure axios-retry to handle 429 errors automatically:
npm install axios axios-retryconst axios = require('axios');
const axiosRetry = require('axios-retry');
// Configure axios with retry logic
axiosRetry(axios, {
retries: 3,
retryDelay: (retryCount, error) => {
// Check for Retry-After header
const retryAfter = error.response?.headers['retry-after'];
if (retryAfter) {
return parseInt(retryAfter) * 1000;
}
// Otherwise use exponential backoff
return axiosRetry.exponentialDelay(retryCount);
},
retryCondition: (error) => {
// Retry on network errors or 429 status
return axiosRetry.isNetworkOrIdempotentRequestError(error)
|| error.response?.status === 429;
},
});
// Make requests normally - retries happen automatically
axios.get('https://api.example.com/data')
.then(response => console.log(response.data))
.catch(error => console.error('Request failed after retries:', error.message));This library handles the complexity of retry logic, exponential backoff, and Retry-After headers automatically.
Control request rate proactively to prevent hitting limits:
class RequestQueue {
constructor(requestsPerSecond = 10) {
this.queue = [];
this.processing = false;
this.interval = 1000 / requestsPerSecond;
this.lastRequestTime = 0;
}
async add(requestFn) {
return new Promise((resolve, reject) => {
this.queue.push({ requestFn, resolve, reject });
this.processQueue();
});
}
async processQueue() {
if (this.processing || this.queue.length === 0) return;
this.processing = true;
while (this.queue.length > 0) {
const now = Date.now();
const timeSinceLastRequest = now - this.lastRequestTime;
if (timeSinceLastRequest < this.interval) {
await new Promise(resolve =>
setTimeout(resolve, this.interval - timeSinceLastRequest)
);
}
const { requestFn, resolve, reject } = this.queue.shift();
this.lastRequestTime = Date.now();
try {
const result = await requestFn();
resolve(result);
} catch (error) {
reject(error);
}
}
this.processing = false;
}
}
// Usage
const queue = new RequestQueue(5); // 5 requests per second
async function makeApiCall(id) {
return queue.add(() =>
fetch(`https://api.example.com/items/${id}`)
.then(res => res.json())
);
}
// Process 100 items with automatic throttling
const results = await Promise.all(
Array.from({ length: 100 }, (_, i) => makeApiCall(i))
);This queue ensures you never exceed the rate limit by controlling request timing.
Cache API responses to minimize unnecessary calls:
const NodeCache = require('node-cache');
const cache = new NodeCache({ stdTTL: 300 }); // 5 minute default TTL
async function cachedFetch(url, ttl = 300) {
const cached = cache.get(url);
if (cached) {
console.log('Returning cached response for:', url);
return cached;
}
try {
const response = await fetch(url);
if (!response.ok) {
throw new Error(`HTTP ${response.status}`);
}
const data = await response.json();
// Check for cache-control headers
const cacheControl = response.headers.get('cache-control');
if (cacheControl?.includes('max-age=')) {
const maxAge = parseInt(cacheControl.match(/max-age=(\d+)/)[1]);
cache.set(url, data, maxAge);
} else {
cache.set(url, data, ttl);
}
return data;
} catch (error) {
console.error('Fetch error:', error.message);
throw error;
}
}
// Usage
const data = await cachedFetch('https://api.example.com/data');Install node-cache: npm install node-cache
Caching prevents repeated requests for the same data, significantly reducing API calls.
Prevent cascading failures by temporarily stopping requests when rate limits are consistently hit:
class CircuitBreaker {
constructor(threshold = 5, timeout = 60000) {
this.failureCount = 0;
this.threshold = threshold;
this.timeout = timeout;
this.state = 'CLOSED'; // CLOSED, OPEN, HALF_OPEN
this.nextAttempt = Date.now();
}
async execute(requestFn) {
if (this.state === 'OPEN') {
if (Date.now() < this.nextAttempt) {
throw new Error('Circuit breaker is OPEN - too many rate limit failures');
}
this.state = 'HALF_OPEN';
}
try {
const result = await requestFn();
this.onSuccess();
return result;
} catch (error) {
this.onFailure(error);
throw error;
}
}
onSuccess() {
this.failureCount = 0;
this.state = 'CLOSED';
}
onFailure(error) {
if (error.response?.status === 429) {
this.failureCount++;
if (this.failureCount >= this.threshold) {
this.state = 'OPEN';
this.nextAttempt = Date.now() + this.timeout;
console.log(`Circuit breaker OPEN. Waiting ${this.timeout}ms`);
}
}
}
}
// Usage
const breaker = new CircuitBreaker(3, 30000); // Open after 3 failures, wait 30s
async function makeRequest(id) {
return breaker.execute(() =>
axios.get(`https://api.example.com/items/${id}`)
);
}This prevents your application from continuing to hammer an API that's already rate limiting you.
Rate Limit Header Standards
Different APIs use varying header names for rate limit information:
- Standard: Retry-After, RateLimit-Limit, RateLimit-Remaining, RateLimit-Reset
- GitHub: X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset
- Twitter: x-rate-limit-limit, x-rate-limit-remaining, x-rate-limit-reset
Always check the specific API's documentation for their header conventions.
Token Bucket vs Fixed Window
APIs may implement rate limiting using different algorithms:
- Fixed Window: Allows N requests per time window (e.g., 100 requests per minute). Counter resets at window boundaries.
- Token Bucket: Refills tokens at a steady rate, allowing burst traffic up to bucket capacity while maintaining average rate.
- Sliding Window: Hybrid approach that prevents boundary exploitation by considering requests over a rolling time period.
Understanding the algorithm helps you optimize request patterns.
Distributed Systems Considerations
In distributed Node.js applications (multiple servers, containers, or workers), coordinate rate limiting across instances:
- Use Redis to share rate limit counters between processes
- Implement distributed locks to prevent race conditions
- Consider using a dedicated API gateway (Kong, Tyk) for centralized rate limiting
- Track per-user/per-tenant limits separately to avoid shared quota exhaustion
429 vs 503 Status Codes
Both can indicate overload, but with different meanings:
- 429 Too Many Requests: Client-specific rate limit exceeded. You're making too many requests.
- 503 Service Unavailable: Server-wide capacity issue. The entire service is overloaded, not just your client.
Both may include Retry-After headers, but 503 suggests infrastructure problems beyond just rate limiting.
Production Monitoring
Set up alerts for 429 errors to detect rate limit issues before they impact users:
- Track 429 error rates in APM tools (New Relic, DataDog, Prometheus)
- Monitor retry queue depths and circuit breaker state changes
- Log rate limit headers to identify approaching limits
- Set up proactive alerts when X-RateLimit-Remaining drops below thresholds
OAuth and Authentication
Some APIs provide higher rate limits for authenticated requests. If you're hitting limits:
- Verify your authentication is working (check for API keys, OAuth tokens)
- Consider upgrading to a paid tier for increased quotas
- Use service accounts or app-specific credentials instead of user tokens
- Implement OAuth token refresh to avoid limits from expired credentials
Error: Listener already called (once event already fired)
EventEmitter listener already called with once()
Error: EACCES: permission denied, open '/root/file.txt'
EACCES: permission denied
Error: Invalid encoding specified (stream encoding not supported)
How to fix Invalid encoding error in Node.js readable streams
Error: EINVAL: invalid argument, open
EINVAL: invalid argument, open
TypeError: readableLength must be a positive integer (stream config)
TypeError: readableLength must be a positive integer in Node.js streams