Objective
Crawler, is a kind of automatic access to Web page content program. is an important part of the search engine, so the search engine optimization is to a large extent to the crawler to make the optimization.
This article introduces the use of Node.js Blog small crawler, the core of the annotation I have labeled, you can understand, just modify the URL and in accordance with the blog to be prone to the internal DOM structure to change the filterchapters and filterchapters1 on the line!
The following words do not say more directly to see the instance code
var http=require (' http ');
var promise=require (' Bluebird ');
var cheerio = require (' Cheerio ');
var url= ' http://www.immaster.cn '//Blog address function filterchapters1 (HTML) {//Parse article link var $ =cheerio.load (HTML);
var post=$ ('. Post ');
var content=[];
Post.each (function (item) {var postid=$ (this). Find ('. Tit '). Find (' a '). attr (' href ');
Content.push (PostID);
}) return content;
function Filterchapters (HTML) {//parsing content in each article var $ =cheerio.load (HTML);
var tit=$ ('. Post. Tit '). Find (' a '). Text ();
var postid=$ ('. Tit '). Find (' a '). attr (' href ');
var commentnum=$ ('. Comments-title '). Text ();
Commentnum=commentnum.trim ();
Commentnum=commentnum.replace (' \ n ', ');
var content={tit:tit,url:postid,commentnum:commentnum};
return content; function GetID (URL) {//Crawl Home article link return new Promise (function (resolve,reject) {http.get (url,function (res) {var html
= '';
Res.on (' Data ', function (data) {html+=data;
});
Res.on (' End ', function () {var content=filterchapters1 (HTML) resolve (content); })
). On (' Error ', function () {reject (E); Console.log (' Crawl Error! ')}})} function GetPageAsync (URL) {//Crawl single page content return new Promise (function (resolve,reject) {console.log (' crawling ... ') +u
RL) Http.get (Url,function (res) {var html = ';
Res.on (' Data ', function (data) {html+=data;
});
Res.on (' End ', function () {resolve (HTML);
}). On (' Error ', function () {reject (E); Console.log (' Crawl Error!
')})} getid (URL). Then (function (PostID) {return new Promise (function (resolve,reject) {var pageurls=[];
Postid.foreach (function (ID) {Pageurls.push (GetPageAsync (id));
}) Resolve (Pageurls); }). Then (function (pageurls) {return new Promise.all (pageurls)//Let Promise object run at the same time}). Then (function (pages) {var
Coursesdata=[];
Pages.foreach (function (HTML) {var courses=filterchapters (HTML);
Coursesdata.push (courses); }) Coursesdata.foreach (function (v) {console.log (' title: ' +v.tit+ ') \ n address: "+v.url+" (Comments: "+v.commentnum)}")})
Summarize
The above is the entire content of this article, I hope the content of this article for everyone to learn or use Node.js to achieve the crawler can help, if you have questions you can message exchange.