===================== crawler principle =====================
Access the news homepage via Python and get news leaderboard links with regular expressions.
Access these links in turn, get the article information from the HTML code of the Web page, and save the information to the article object.
The data in the article object is saved to the database through Pymysql "third-party modules".
===================== data Structure =====================
CREATE TABLE' News ' (' ID ' )int(6) unsigned auto_increment not NULL, ' title 'varchar( $) not NULL, ' author 'varchar( A) not NULL, ' Date 'varchar( A) not NULL, ' about 'varchar(255) not NULL, ' content 'text not NULL, PRIMARY KEY(' id ')) ENGINE=InnoDBDEFAULTCHARSET=UTF8;
===================== script code =====================
#Baidu Hundreds of articles collectedImportReImporturllib.requestImportpymysql.cursors#Database Configuration ParametersConfig = { 'Host':'localhost', 'Port':'3310', 'username':'Woider', 'Password':'3243', 'Database':'python', 'CharSet':'UTF8'}#data table creation statement" "CREATE TABLE ' news ' (' id ' int (6) unsigned auto_increment not null, ' title ' varchar "NOT null, ' author ' varchar Not NULL, ' date ' varchar (+) NOT null, ' about ' varchar (255) is not NULL, ' content ' text is not NULL, PRIMARY KEY (' id ') ) Engine=innodb DEFAULT Charset=utf8;" "#Article Objectclassarticle (object): Title=None Author=None Date=None About=None Content=NonePass#Regular ExpressionsPatarticle ='<p\s*class= "title" ><a\s*href= "(. +?)"' #Matching article linksPattitle ='<div\s*id= "page" >\s*' #Match article titlePatauthor ='<div\s*class= "Article-info" >\s*<a.+?> (. +) </a>' #Match article authorPatdate ='<span\s*class= "Time" > (. +) </span>' #Match Release datePatabout ='<blockquote><i\s*class= "I\siquote" ></i> (. +) </blockquote>' #Introduction to matching articlesPatcontent ='<div\s*class= "Article-detail" > (. | \s) +)' #Match article contentPatcopy ='<div\s*class= "Copyright" > (. | \s) +' #Matching copyright NoticePattag ='(<script (. | \s) *?) </script>) | (<.*?>\s*)' #Match HTML Tags#article Informationdefcollect_article (URL): article=article () HTML= Urllib.request.urlopen (URL). read (). Decode ('UTF8') Article.title=Re.findall (pattitle, HTML) [0] Article.author=Re.findall (patauthor, HTML) [0] Article.date=Re.findall (patdate, HTML) [0] Article.about=Re.findall (patabout, HTML) [0] content=Re.findall (patcontent, HTML) [0] content= Re.sub (Patcopy,"', content[0]) content= Re.sub ('</p>','\ n', content) content= Re.sub (Pattag,"', content) article.content=contentreturnarticle#Store Informationdefsave_article (Connect, article): Message=NoneTry: Cursor=connect.cursor () SQL="INSERT into News (title, author, date, about, content) VALUES (%s,%s,%s ,%s,%s)"Data=(Article.title, Article.author, Article.date, Article.about, article.content) cursor.execute (SQL, data) Connect.commit ()exceptException as E:message=Str (e)Else: Message=Article.titlefinally: Cursor.close ()returnmessage#Crawl LinksHome ='http://baijia.baidu.com/' #Baidu Homehtml = Urllib.request.urlopen (home). Read (). Decode ('UTF8')#get page source codeLinks = Re.findall (patarticle, HTML) [0:10]#Daily Hot News#connecting to a databaseConnect =Pymysql.connect (Host=config['Host'], Port=int (config['Port']), user=config['username'], passwd=config['Password'], DB=config['Database'], CharSet=config['CharSet']) forUrlinchlinks:article= collect_article (URL)#Collect article InformationMessage = Save_article (connect,article)#Save article Information Print(message)Passconnect.close ()#To close a database connection
===================== Running Results =====================
Python Web crawler (News collection script)