Node.js realizes the blog small crawler's instance code _node.js

Source: Internet
Author: User

Objective

Crawler, is a kind of automatic access to Web page content program. is an important part of the search engine, so the search engine optimization is to a large extent to the crawler to make the optimization.

This article introduces the use of Node.js Blog small crawler, the core of the annotation I have labeled, you can understand, just modify the URL and in accordance with the blog to be prone to the internal DOM structure to change the filterchapters and filterchapters1 on the line!

The following words do not say more directly to see the instance code

var http=require (' http ');
var promise=require (' Bluebird ');
var cheerio = require (' Cheerio ');
 var url= ' http://www.immaster.cn '//Blog address function filterchapters1 (HTML) {//Parse article link var $ =cheerio.load (HTML);
 
 var post=$ ('. Post ');
 var content=[];
 
 Post.each (function (item) {var postid=$ (this). Find ('. Tit '). Find (' a '). attr (' href ');
 Content.push (PostID);
}) return content;
 function Filterchapters (HTML) {//parsing content in each article var $ =cheerio.load (HTML);
 var tit=$ ('. Post. Tit '). Find (' a '). Text ();
 var postid=$ ('. Tit '). Find (' a '). attr (' href ');
 var commentnum=$ ('. Comments-title '). Text ();
 Commentnum=commentnum.trim ();
 Commentnum=commentnum.replace (' \ n ', ');
 var content={tit:tit,url:postid,commentnum:commentnum};
return content; function GetID (URL) {//Crawl Home article link return new Promise (function (resolve,reject) {http.get (url,function (res) {var html
 = '';
 Res.on (' Data ', function (data) {html+=data;
 });
 
 Res.on (' End ', function () {var content=filterchapters1 (HTML) resolve (content); })
). On (' Error ', function () {reject (E); Console.log (' Crawl Error! ')}})} function GetPageAsync (URL) {//Crawl single page content return new Promise (function (resolve,reject) {console.log (' crawling ... ') +u
 RL) Http.get (Url,function (res) {var html = ';
 Res.on (' Data ', function (data) {html+=data;
 });
 
 Res.on (' End ', function () {resolve (HTML);
 }). On (' Error ', function () {reject (E); Console.log (' Crawl Error!
 ')})} getid (URL). Then (function (PostID) {return new Promise (function (resolve,reject) {var pageurls=[];
 Postid.foreach (function (ID) {Pageurls.push (GetPageAsync (id));
 }) Resolve (Pageurls); }). Then (function (pageurls) {return new Promise.all (pageurls)//Let Promise object run at the same time}). Then (function (pages) {var
 Coursesdata=[];
 Pages.foreach (function (HTML) {var courses=filterchapters (HTML);
 Coursesdata.push (courses); }) Coursesdata.foreach (function (v) {console.log (' title: ' +v.tit+ ') \ n address: "+v.url+" (Comments: "+v.commentnum)}")})

Summarize

The above is the entire content of this article, I hope the content of this article for everyone to learn or use Node.js to achieve the crawler can help, if you have questions you can message exchange.

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.