Fastest Way to Copy a File in Node.Js

Fastest way to copy a file in Node.js

Use the standard built-in way fs.copyFile:

const fs = require('fs');

// File destination.txt will be created or overwritten by default.
fs.copyFile('source.txt', 'destination.txt', (err) => {
if (err) throw err;
console.log('source.txt was copied to destination.txt');
});

If you have to support old end-of-life versions of Node.js - here is how you do it in versions that do not support fs.copyFile:

const fs = require('fs');
fs.createReadStream('test.log').pipe(fs.createWriteStream('newLog.log'));

How to copy multiple files using fs.copyFile node?

You can simply create a function of your own that takes the src and dest path and an array of filenames as arguments:

const util = require('util');
const fs = require('fs');
const path = require('path');
const copyFilePromise = util.promisify(fs.copyFile);

function copyFiles(srcDir, destDir, files) {
return Promise.all(files.map(f => {
return copyFilePromise(path.join(srcDir, f), path.join(destDir, f));
}));
}

// usage
copyFiles('src', 'build', ['unk.txt', 'blah.txt']).then(() => {
console.log("done");
}).catch(err => {
console.log(err);
});

Node.js script to copy files overwriting previous ones?

It will work, if you take this line off:

execute()

Copy a source file to another destination in Nodejs

Try:

var fs = require('fs-extra');

fs.copySync(path.resolve(__dirname,'./mainisp.jpg'), './test/mainisp.jpg');

As you can see in the error message, you're trying to read the file from E:\mainisp.jpg instead of the current directory.

You also need to specify the target path with the file, not only the destination folder.

Copy folder recursively in Node.js

Since Node v16.7.0 it is possible to use fs.cp or fs.cpSync function.

fs.cp(src, dest, {recursive: true});

Current stability (in Node v18.7.0) is Experimental.

Fetching Large File For Processing Using Node.js

You have no flow control on the file.write(chunk). You need to pay attention to the return value from file.write(chunk) and when it returns false, you have to wait for the drain event before writing more. Otherwise, you can overflow the buffer on the writestream, particularly when writing large things to a slow medium like disk.

When you lack flow control when attempting to write large things faster than the disk can keep up, you will probably blow up your memory usage because the stream has to accumulate more data in its buffer than is desirable.

Since your data is coming from a readable, when you get false back from the file.write(chunk), you will also have to pause the incoming read stream so it doesn't keep spewing data events at you while you're waiting for the drain event on the writestream. When you get the drain event, you can then resume the readstream.

FYI, if you don't need the progress info, you can let pipeline() do all the work (including the flow control) for you. You don't have to write that code yourself. You may even be able to still gather the progress info, by just watching the writestream activity when using pipeline().

Here's one way to implement the flow control yourself, though I'd recommend you use the pipeline() function in the stream module and let it do all this for you if you can:

const file = fs.createWriteStream(fileName);
file.on("error", err => console.log(err));
http.get(url).on("response", function(res) {
let downloaded = 0;
res.on("data", function(chunk) {
let readyForMore = file.write(chunk);
if (!readyForMore) {
// pause readstream until drain event comes
res.pause();
file.once('drain', () => {
res.resume();
});
}
downloaded += chunk.length;
process.stdout.write(`Downloaded ${(downloaded / 1000000).toFixed(2)} MB of ${fileName}\r`);
}).on("end", function() {
file.end(); console.log(`${fileName} downloaded successfully.`);
}).on("error", err => console.log(err));
});

There also appeared to be a timeout issue in the http request. When I added this:

// set client timeout to 24 hours
res.setTimeout(24 * 60 * 60 * 1000);

I was then able to download the whole 7GB ZIP file.

Here's turnkey code that worked for me:

const fs = require('fs');
const https = require('https');
const url =
"https://www2.census.gov/programs-surveys/acs/summary_file/2020/data/5_year_entire_sf/All_Geographies_Not_Tracts_Block_Groups.zip";
const fileName = "census-data2.zip";

const file = fs.createWriteStream(fileName);
file.on("error", err => {
console.log(err);
});
const options = {
headers: {
"accept-encoding": "gzip, deflate, br",
}
};
https.get(url, options).on("response", function(res) {
const startTime = Date.now();

function elapsed() {
const delta = Date.now() - startTime;
// convert to minutes
const mins = (delta / (1000 * 60));
return mins;
}

let downloaded = 0;
console.log(res.headers);
const contentLength = +res.headers["content-length"];
console.log(`Expecting download length of ${(contentLength / (1024 * 1024)).toFixed(2)} MB`);
// set timeout to 24 hours
res.setTimeout(24 * 60 * 60 * 1000);
res.on("data", function(chunk) {
let readyForMore = file.write(chunk);
if (!readyForMore) {
// pause readstream until drain event comes
res.pause();
file.once('drain', () => {
res.resume();
});
}
downloaded += chunk.length;
const downloadPortion = downloaded / contentLength;
const percent = downloadPortion * 100;
const elapsedMins = elapsed();
const totalEstimateMins = (1 / downloadPortion) * elapsedMins;
const remainingMins = totalEstimateMins - elapsedMins;

process.stdout.write(
` ${elapsedMins.toFixed(2)} mins, ${percent.toFixed(1)}% complete, ${Math.ceil(remainingMins)} mins remaining, downloaded ${(downloaded / (1024 * 1024)).toFixed(2)} MB of ${fileName} \r`
);
}).on("end", function() {
file.end();
console.log(`${fileName} downloaded successfully.`);
}).on("error", err => {
console.log(err);
}).on("timeout", () => {
console.log("got timeout event");
});
});


Related Topics



Leave a reply



Submit