NodeJS https request needs excessive memory
I am trying to load the json of this url https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json . The file has over 600MB to download.
I am using this code in a lambda function:
The lambda function is setup with a timeout of 10min and 3008MB of memory. But the function is trying to use more than that.
let res = await doRequest2('https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json');
function doRequest2(url) {
console.log("doRequest");
return new Promise(function (resolve, reject) {
const https = require("https");
const url = "https://pricing.us-east- 1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json";
https.get(url, res => {
res.setEncoding("utf8");
let body = "";
res.on("data", data => {
body += data;
});
res.on("end", () => {
body = JSON.parse(body);
resolve(body);
console.log(body);
});
res.on("error", () => {
console.log("Error");
reject("error");
});
});
});
}
How can the json download need so much memory?
node.js amazon-web-services aws-lambda
add a comment |
I am trying to load the json of this url https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json . The file has over 600MB to download.
I am using this code in a lambda function:
The lambda function is setup with a timeout of 10min and 3008MB of memory. But the function is trying to use more than that.
let res = await doRequest2('https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json');
function doRequest2(url) {
console.log("doRequest");
return new Promise(function (resolve, reject) {
const https = require("https");
const url = "https://pricing.us-east- 1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json";
https.get(url, res => {
res.setEncoding("utf8");
let body = "";
res.on("data", data => {
body += data;
});
res.on("end", () => {
body = JSON.parse(body);
resolve(body);
console.log(body);
});
res.on("error", () => {
console.log("Error");
reject("error");
});
});
});
}
How can the json download need so much memory?
node.js amazon-web-services aws-lambda
instead of downloading ,you can just stream your data to destination..like res.on("data", data => { response.pipe(data) });..Based on requirement
– BittuS
Jan 2 at 8:36
@BittuS I would like to save the file to AWS S3, do you know if I can stream it to there?
– Alex
Jan 2 at 12:11
add a comment |
I am trying to load the json of this url https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json . The file has over 600MB to download.
I am using this code in a lambda function:
The lambda function is setup with a timeout of 10min and 3008MB of memory. But the function is trying to use more than that.
let res = await doRequest2('https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json');
function doRequest2(url) {
console.log("doRequest");
return new Promise(function (resolve, reject) {
const https = require("https");
const url = "https://pricing.us-east- 1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json";
https.get(url, res => {
res.setEncoding("utf8");
let body = "";
res.on("data", data => {
body += data;
});
res.on("end", () => {
body = JSON.parse(body);
resolve(body);
console.log(body);
});
res.on("error", () => {
console.log("Error");
reject("error");
});
});
});
}
How can the json download need so much memory?
node.js amazon-web-services aws-lambda
I am trying to load the json of this url https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json . The file has over 600MB to download.
I am using this code in a lambda function:
The lambda function is setup with a timeout of 10min and 3008MB of memory. But the function is trying to use more than that.
let res = await doRequest2('https://pricing.us-east-1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json');
function doRequest2(url) {
console.log("doRequest");
return new Promise(function (resolve, reject) {
const https = require("https");
const url = "https://pricing.us-east- 1.amazonaws.com/offers/v1.0/aws/AmazonEC2/current/index.json";
https.get(url, res => {
res.setEncoding("utf8");
let body = "";
res.on("data", data => {
body += data;
});
res.on("end", () => {
body = JSON.parse(body);
resolve(body);
console.log(body);
});
res.on("error", () => {
console.log("Error");
reject("error");
});
});
});
}
How can the json download need so much memory?
node.js amazon-web-services aws-lambda
node.js amazon-web-services aws-lambda
asked Jan 2 at 7:59
AlexAlex
5610
5610
instead of downloading ,you can just stream your data to destination..like res.on("data", data => { response.pipe(data) });..Based on requirement
– BittuS
Jan 2 at 8:36
@BittuS I would like to save the file to AWS S3, do you know if I can stream it to there?
– Alex
Jan 2 at 12:11
add a comment |
instead of downloading ,you can just stream your data to destination..like res.on("data", data => { response.pipe(data) });..Based on requirement
– BittuS
Jan 2 at 8:36
@BittuS I would like to save the file to AWS S3, do you know if I can stream it to there?
– Alex
Jan 2 at 12:11
instead of downloading ,you can just stream your data to destination..like res.on("data", data => { response.pipe(data) });..Based on requirement
– BittuS
Jan 2 at 8:36
instead of downloading ,you can just stream your data to destination..like res.on("data", data => { response.pipe(data) });..Based on requirement
– BittuS
Jan 2 at 8:36
@BittuS I would like to save the file to AWS S3, do you know if I can stream it to there?
– Alex
Jan 2 at 12:11
@BittuS I would like to save the file to AWS S3, do you know if I can stream it to there?
– Alex
Jan 2 at 12:11
add a comment |
1 Answer
1
active
oldest
votes
The memory is being used for so much more than just holding a copy of the json file. It is running your actual program, maintaining a https link and all of the various things that computers do.
The download is actually quite a bit of heavy lifting. Due to the size of file the memory usage can be a bit spikey.
I have actually managed to get your code working, but only one in four times, because as soon as the memory usage is maxed, the lambda will cut out.
This job seems to be a little to big for what lambda can handle (reliably at least).
NB - I removed a previous answer pointing to the disk space limit available in the temp folder as after my own testing I could see this was not the case.
Thank you, I am afraid you are right.
– Alex
Jan 2 at 19:06
add a comment |
Your Answer
StackExchange.ifUsing("editor", function () {
StackExchange.using("externalEditor", function () {
StackExchange.using("snippets", function () {
StackExchange.snippets.init();
});
});
}, "code-snippets");
StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "1"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);
StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});
function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
autoActivateHeartbeat: false,
convertImagesToLinks: true,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: 10,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});
}
});
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f54003018%2fnodejs-https-request-needs-excessive-memory%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
1 Answer
1
active
oldest
votes
1 Answer
1
active
oldest
votes
active
oldest
votes
active
oldest
votes
The memory is being used for so much more than just holding a copy of the json file. It is running your actual program, maintaining a https link and all of the various things that computers do.
The download is actually quite a bit of heavy lifting. Due to the size of file the memory usage can be a bit spikey.
I have actually managed to get your code working, but only one in four times, because as soon as the memory usage is maxed, the lambda will cut out.
This job seems to be a little to big for what lambda can handle (reliably at least).
NB - I removed a previous answer pointing to the disk space limit available in the temp folder as after my own testing I could see this was not the case.
Thank you, I am afraid you are right.
– Alex
Jan 2 at 19:06
add a comment |
The memory is being used for so much more than just holding a copy of the json file. It is running your actual program, maintaining a https link and all of the various things that computers do.
The download is actually quite a bit of heavy lifting. Due to the size of file the memory usage can be a bit spikey.
I have actually managed to get your code working, but only one in four times, because as soon as the memory usage is maxed, the lambda will cut out.
This job seems to be a little to big for what lambda can handle (reliably at least).
NB - I removed a previous answer pointing to the disk space limit available in the temp folder as after my own testing I could see this was not the case.
Thank you, I am afraid you are right.
– Alex
Jan 2 at 19:06
add a comment |
The memory is being used for so much more than just holding a copy of the json file. It is running your actual program, maintaining a https link and all of the various things that computers do.
The download is actually quite a bit of heavy lifting. Due to the size of file the memory usage can be a bit spikey.
I have actually managed to get your code working, but only one in four times, because as soon as the memory usage is maxed, the lambda will cut out.
This job seems to be a little to big for what lambda can handle (reliably at least).
NB - I removed a previous answer pointing to the disk space limit available in the temp folder as after my own testing I could see this was not the case.
The memory is being used for so much more than just holding a copy of the json file. It is running your actual program, maintaining a https link and all of the various things that computers do.
The download is actually quite a bit of heavy lifting. Due to the size of file the memory usage can be a bit spikey.
I have actually managed to get your code working, but only one in four times, because as soon as the memory usage is maxed, the lambda will cut out.
This job seems to be a little to big for what lambda can handle (reliably at least).
NB - I removed a previous answer pointing to the disk space limit available in the temp folder as after my own testing I could see this was not the case.
answered Jan 2 at 15:03


K MoK Mo
934411
934411
Thank you, I am afraid you are right.
– Alex
Jan 2 at 19:06
add a comment |
Thank you, I am afraid you are right.
– Alex
Jan 2 at 19:06
Thank you, I am afraid you are right.
– Alex
Jan 2 at 19:06
Thank you, I am afraid you are right.
– Alex
Jan 2 at 19:06
add a comment |
Thanks for contributing an answer to Stack Overflow!
- Please be sure to answer the question. Provide details and share your research!
But avoid …
- Asking for help, clarification, or responding to other answers.
- Making statements based on opinion; back them up with references or personal experience.
To learn more, see our tips on writing great answers.
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f54003018%2fnodejs-https-request-needs-excessive-memory%23new-answer', 'question_page');
}
);
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Sign up or log in
StackExchange.ready(function () {
StackExchange.helpers.onClickDraftSave('#login-link');
});
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Sign up using Google
Sign up using Facebook
Sign up using Email and Password
Post as a guest
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
Required, but never shown
instead of downloading ,you can just stream your data to destination..like res.on("data", data => { response.pipe(data) });..Based on requirement
– BittuS
Jan 2 at 8:36
@BittuS I would like to save the file to AWS S3, do you know if I can stream it to there?
– Alex
Jan 2 at 12:11