[Python crawler] 13: Selenium +PHANTOMJS Crawl Activity tree Meeting activity data

Source: Internet
Author: User

Crawl Activity Tree Site meeting activity data (http://www.huodongshu.com/html/index.html)

The specific idea is [Python crawler] in the Xi. crawl activity line site similar, are used multi-threaded to crawl, but because of the active tree site, each keyword search page ur is fixed, such as the search "number" results have 470 results, no page 10 records, the second page of the URL and the first page of the The URL is the same.

Therefore, a single thread is used to search for each key word.

 

The specific code is as follows:

    

# Coding=utf-8
Import OS
Import re
From selenium import Webdriver
Import Selenium.webdriver.support.ui as UI
From Selenium.webdriver.common.keys import keys
Import time
From Selenium.webdriver.common.action_chains import Actionchains
From Selenium.webdriver.support.select Import Select
Import IniFile
From Selenium.webdriver.common.keys import keys
From threading Import Thread
Import Thread
Import LogFile
Import Urllib
Import Mongodbbase
#抓取数据线程类
Class Scrapydata_thread (Thread):
#抓取数据线程类
def __init__ (self,websearchurl,pagecountlable,htmllable,originalurllabel,nexturllabel,keyword,db):
‘‘‘
constructor function
:p Aram Websearchurl: Search Page URL
:p Aram pagecountlable: Search page Labels
:p Aram Htmllable: tags to search for
:p Aram Originalurllabel: The URL tag for each record
:p Aram Nexturllabel: Next page tab
:p Aram Keywords: keywords to search, separated by semicolons in the middle of multiple keywords (;)
:P Aram DB: Saving the Database engine
‘‘‘
Thread.__init__ (self)

Self.websearchurl = Websearchurl
Self.pagecountlable = pagecountlable
Self.htmllable = htmllable
Self.originalurllabel = Originalurllabel
Self.nexturllabel = Nexturllabel
Self.keyword = keyword
SELF.DB = db

# iedriverserver = self.cf.GetValue ("section", "Iedriverserver")
# os.environ["Webdriver.ie.driver"] = Iedriverserver
# self.urldriver = Webdriver. Ie (Iedriverserver)

Self.driver = Webdriver. PHANTOMJS ()
Self.wait = UI. Webdriverwait (Self.driver, 20)
Self.driver.maximize_window ()

def comparedate (self, Dateleft, dateright):
‘‘‘
Compare the size of two dates
:p Aram Dateleft: Date format 2017-03-04
:p Aram dateright: Date format 2017-03-04
: Return:1: Left greater than right, 0: equal,-1: Left less than right
‘‘‘
DLS = Dateleft.split ('-')
DRS = Dateright.split ('-')
If Len (DLs) > Len (DRS):
Return 1
if Int (dls[0]) = = Int (drs[0]) and int (dls[1]) = = Int (drs[1]) and int (dls[2]) = = Int (drs[2]):
return 0

if Int (dls[0]) > int (drs[0]):
Return 1
elif Int (dls[0]) = = Int (drs[0]) and int (dls[1]) > int (drs[1]):
Return 1
elif Int (dls[0]) = = Int (drs[0]) and int (dls[1]) = = Int (drs[1]) and int (dls[2]) > int (drs[2]):
Return 1
Return-1

def date_isvalid (self, Strdatetext):
‘‘‘
Determines whether a datetime string is valid: If the given time is greater than the current time, or if the current time is within a given range
:p Aram Strdatetext: three formats ' 017.04.27 ~ 04.28 '; ' 2017.04.20 08:30 ~ 12:30 '; ' 2015.12.29 ~ 2016.01.03 '
: return:true: Legal; False: illegal
‘‘‘
Datepattern = Re.compile (R ' \d{4}-\d{2}-\d{2} ')
Date = Strdatetext.replace ('. ', '-')
Strdate = Re.findall (Datepattern, date)
currentdate = Time.strftime ('%y-%m-%d ')

If Len (strdate) = = 2:
If Self.comparedate (strdate[1], currentdate) > 0:
Return True
Elif len (strdate) = = 1:
If Self.comparedate (Strdate[0], currentdate) >= 0:
Return True
Else
Datepattern = Re.compile (R ' \d{4}-\d{2}-\d{2}\s~\s\d{2}-\d{2} ')
#2015-06-04 13:30 ~ 17:30
Strdate = Re.findall (Datepattern, date)
If Len (strdate) >0:
If Self.comparedate (Strdate[0][0:5] + strdate[0][13:], currentdate) >= 0:
Return True
Else
Return False
Return False

def run (self):
print '
print ' keyword:%s '% Self.keyword
Self.driver.get (Self.websearchurl)
Time.sleep (5)
# Number of records
pagecount_elements = Self.driver.find_elements_by_xpath (self.pagecountlable)
If Len (pagecount_elements) > 0:
Strcount = Pagecount_elements[0].text.encode (' UTF8 ')
PageCount = Int (strcount)/10
if Int (strcount)% > 0:
PageCount = PageCount + 1

Page_count = PageCount
PageIndex = 0
KWord = Self.keyword
RecordCount = 0
While PageCount > 0:
PageCount = pageCount-1
If PageIndex > 0:
Next_element = Self.driver.find_elements_by_xpath (Self.nexturllabel)
If Len (next_element) > 0:
Next_element[0].click ()
Time.sleep (3)

Self.wait.until (Lambda Driver:self.driver.find_elements_by_xpath (self.htmllable))
Elements = Self.driver.find_elements_by_xpath (self.htmllable)

# find the original URL corresponding to the microblog
Urllist = []
Self.wait.until (Lambda Driver:self.driver.find_elements_by_xpath (Self.originalurllabel))
hrefelements = Self.driver.find_elements_by_xpath (Self.originalurllabel)
For Hrefe in hrefelements:
Urllist.append (Hrefe.get_attribute (' href '). Encode (' UTF8 '))

index = 0
strmessage = ' '
Strsplit = ' \ n------------------------------------------------------------------------------------\ n '
index = 0
# Useful records in each page
Usefulcount = 0
Meetinglist = []
For element in Elements:
txt = element.text.encode (' UTF8 ')

txts = Txt.split (' \ n ')

# strdate = Re.findall (self.datepattern, TXT)
# date is greater than today and the search keyword is considered a composite requirement in the title
If Self.date_isvalid (Txts[1]) and Txts[0].find (KWord) >-1:
DICTM = {' title ': txts[0], ' date ': txts[1],
' URL ': urllist[index], ' keyword ': kword, ' info ': txt}
Meetinglist.append (DICTM)

# print '
# Print TXT
# print ' Active link: ' + urllist[index]
# Print Strsplit
#
# strmessage = txt + "\ n"
# strmessage + = ' Activity link: ' + urllist[index] + ' \ n '
# strmessage + = Strsplit
# strmessage = Unicode (strmessage, ' UTF8 ')
# # Log. Writelog (strmessage)
Usefulcount = Usefulcount + 1
RecordCount = RecordCount + 1
index = index + 1

PageIndex = PageIndex + 1
If Usefulcount = = 0:
Break
Else
Self.db.SaveMeetings (meetinglist) #保存数据库中

Print "Viewed:%d page data"% Page_count
Print "Total crawled:%d eligible activity records"% RecordCount

Self.driver.close ()
Self.driver.quit ()

if __name__ = = ' __main__ ':

ConfigFile = Os.path.join (OS.GETCWD (), ' activity.conf ')
CF = Inifile.configfile (configfile)
Websearchurl = cf. GetValue ("section", "Websearchurl")
pagecountlable = cf. GetValue ("section", "Pagecountlable")
htmllable = cf. GetValue ("section", "Htmllable")
Originalurllabel = cf. GetValue ("section", "Originalurllabel")
Nexturllabel = cf. GetValue ("section", "Nexturllabel")

Keywords= cf. GetValue ("section", "keywords")
KeywordList = Keywords.split (';')
Start = Time.clock ()
db = Mongodbbase.mongodbbase ()
For keyword in keywordlist:
If len (keyword) > 0:
url = websearchurl + urllib.quote (keyword)
t = scrapydata_thread (URL, pagecountlable, htmllable,originalurllabel,nexturllabel,keyword,db)
T.setdaemon (True)
T.start ()
T.join ()

End = Time.clock ()
Print "Entire process time:%f seconds"% (End-start)

  
Configuration file Contents:

[Section]
#IE驱动的路径
Iedriverserver = C:\Program files\internet Explorer\iedriverserver.exe

#要搜索的标签, if there are multiple, the middle is separated by semicolons
htmllable =//div[@id = ' EventList ']/div[@class = ' list ']


#要获取爬虫也是的标签
pagecountlable =//span[@id = ' eventnumber ')

#给定网址的搜索首页Url
Websearchurl = http://www.huodongshu.com/html/find_search.html?search_keyword=


#查找对应的原始url
Originalurllabel =//div[@class = ' listr ']/h2/a

#下一页链接对应的标签
Nexturllabel =//dt[@class = ' Next ']/a

#文本输入框要搜索的关键字
keywords = Internet TV; smart TV; digital; av; home entertainment; programs; audiovisual; copyright; data


[Python crawler] 13: Selenium +PHANTOMJS Crawl Activity tree Meeting activity data

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.