[Python crawler] Nine: Selenium +phantomjs crawl activity row in the meeting activity (single thread crawl)

Source: Internet
Author: User
Tags keyword list

The idea is to give a series of keywords: Internet TV, Smart TV, digital, audio-visual, home Entertainment, program, audio-visual, copyright, data, etc. In the Active Line site Search page (http://www.huodongxing.com/search?city=%E5%85%A8%E5%9B%BD&pi=1), enter each keyword in the text entry box, and fetch the required data in the search results.

First through the Selenium+ie driver to get each keyword search results url (first, because the URL of each page is not the same index) and the total number of pages, saved in the list. Then loop through the list and fetch the corresponding data using the selenium +PHANTOMJS to fetch the URL corresponding to each keyword.

  

  

  

The specific code is as follows:

  

# Coding=utf-8
Import OS
Import re
From selenium import Webdriver
Import Selenium.webdriver.support.ui as UI
From Selenium.webdriver.common.keys import keys
Import time
From Selenium.webdriver.common.action_chains import Actionchains
From Selenium.webdriver.support.select Import Select
Import IniFile
From Selenium.webdriver.common.keys import keys
Import LogFile

Class Huodongxing:

def __init__ (self):
#通过配置文件获取IEDriverServer. exe path
ConfigFile = Os.path.join (OS.GETCWD (), ' meetingconfig.conf ')
SELF.CF = Inifile.configfile (configfile)
Iedriverserver = Self.cf.GetValue ("section", "Iedriverserver")
#每抓取一页数据延迟的时间, in seconds, default is 5 seconds
Self.pagedelay = 5
Pageinteraldelay = Self.cf.GetValue ("section", "Pageinteraldelay")
If Pageinteraldelay:
self.pagedelay = Int (pageinteraldelay)

os.environ["Webdriver.ie.driver"] = Iedriverserver
Self.urldriver = Webdriver. Ie (Iedriverserver)
# self.driver = Webdriver. PHANTOMJS ()
Self.wait = UI. Webdriverwait (Self.urldriver, 20)
Self.urldriver.maximize_window ()

def comparedate (Self,dateleft, dateright):
‘‘‘
Compare the size of two dates
:p Aram Dateleft: Date format 2017-03-04
:p Aram dateright: Date format 2017-03-04
: Return:1: Left greater than right, 0: equal,-1: Left less than right
‘‘‘
DLS = Dateleft.split ('-')
DRS = Dateright.split ('-')
If Len (DLs) > Len (DRS):
Return 1
if Int (dls[0]) = = Int (drs[0]) and int (dls[1]) = = Int (drs[1]) and int (dls[2]) = = Int (drs[2]):
return 0

if Int (dls[0]) > int (drs[0]):
Return 1
elif Int (dls[0]) = = Int (drs[0]) and int (dls[1]) > int (drs[1]):
Return 1
elif Int (dls[0]) = = Int (drs[0]) and int (dls[1]) = = Int (drs[1]) and int (dls[2]) > int (drs[2]):
Return 1
Return-1

def scroll_top (self):
‘‘‘
Scroll bar Pull to top
: return:
‘‘‘
if self.urldriver.name = = "Chrome":
JS = "var q=document.body.scrolltop=0"

Else
JS = "var q=document.documentelement.scrolltop=0"
Return Self.urldriver.execute_script (JS)

def scroll_foot (self):
‘‘‘
scroll bar to bottom
: return:
‘‘‘

if self.urldriver.name = = "Chrome":
JS = "var q=document.body.scrolltop=10000"

Else
JS = "var q=document.documentelement.scrolltop=10000"
Return Self.urldriver.execute_script (JS)

def get_urlpagecountlist (self,websearchurl,keywordlist):
‘‘‘
Get each keyword based on your keyword list and search for the URL of the content and the corresponding number of pages
:p Aram Websearchurl: Search home for a given URL
:p Aram keywordlist: List of keywords
: A list of return:url and corresponding pages
‘‘‘
Search_url_pagecount_list = []
# Firsturl = self.cf.GetValue ("section", "Websearchurl")
Self.urldriver.get (Websearchurl)
# self.urldriver.implicitly_wait (3)
Time.sleep (3)
pagecountlable = Self.cf.GetValue ("section", "Pagecountlable")
For keyword in keywordlist:
If len (keyword) > 0:
js = "var obj = document.getElementById (' Mainsearchtextbox '); obj.value= '" + keyword + "';"
print ' keyword:%s '% keyword
Self.urldriver.execute_script (JS)
# Click the search link
Ss_elements = self.urldriver.find_element_by_id ("Mainsearchtextbox")
Ss_elements.send_keys (Keys.return)
Time.sleep (5)
Current_url = Self.urldriver.current_url.replace (' Pi=1 ', ' pi= ')
Try
elements = Self.urldriver.find_elements_by_xpath (pagecountlable)
# Number of pages to crawl
Strcount = Elements[0].text.encode (' UTF8 ')
PageCount = Int (strcount)/10
if Int (strcount)% > 0:
PageCount = PageCount + 1
Search_url_pagecount_list.append (Current_url + ' _ ' + str (pagecount))
Except Exception, E:
Print E.message

Self.urldriver.close ()
Self.urldriver.quit ()
Self.driver = Webdriver. PHANTOMJS ()
Self.wait = UI. Webdriverwait (Self.driver, 20)
Self.driver.maximize_window ()
Return search_url_pagecount_list

def scrapy_data (self):
"Crawl Data"
Start = Time.clock ()

Websearchurl = Self.cf.GetValue ("section", "Websearchurl")
Keyword = self.cf.GetValue ("section", "keywords")
KeywordList = Keyword.split (';')
#搜索页及对应的页数
Search_url_pagecount_list = Self.get_urlpagecountlist (websearchurl,keywordlist)

If Len (search_url_pagecount_list) > 0:
htmllable = Self.cf.GetValue ("section", "Htmllable")
# logfile = Os.path.join (OS.GETCWD (), R ' Log.txt ')
# log = Logfile.logfile (LogFile)

Originalurllabel = Self.cf.GetValue ("section", "Originalurllabel")
currentdate = Time.strftime ('%y-%m-%d ')
Datepattern = Re.compile (R ' \d{4}-\d{2}-\d{2} ')
Keyword_index = 0
For Url_pagecount in Search_url_pagecount_list:
Try
KWord = Keywordlist[keyword_index]
print '
print ' keyword:%s '% KWord
PageCount = Int (Url_pagecount.split ('_') [1])
Page_count = PageCount
RecordCount = 0
If PageCount > 0:
Current_url = Url_pagecount.split ('_') [0]
PageIndex = 0
While PageCount > 0:
url = current_url + str (pageIndex)
Self.driver.get (URL)

# 3 seconds delay
Time.sleep (3)
# self.driver.implicitly_wait (3)
PageCount = pageCount-1
Self.wait.until (Lambda Driver:self.driver.find_elements_by_xpath (htmllable))
Elements = Self.driver.find_elements_by_xpath (htmllable)

# find the original URL corresponding to the microblog
Urllist = []
Self.wait.until (Lambda Driver:self.driver.find_elements_by_xpath (Originalurllabel))
hrefelements = Self.driver.find_elements_by_xpath (Originalurllabel)
For Hrefe in hrefelements:
Urllist.append (Hrefe.get_attribute (' href '). Encode (' UTF8 '))

# self.driver.implicitly_wait (2)
index = 0
strmessage = ' '
Strsplit = ' \ n------------------------------------------------------------------------------------\ n '
index = 0
# Useful records in each page
Usefulcount = 0
For element in Elements:
txt = element.text.encode (' UTF8 ')
txts = Txt.split (' \ n ')
Strdate = Re.findall (datepattern, TXT)
# date is greater than today and the search keyword is considered a composite requirement in the title
If Len (strdate) > 0 and Self.comparedate (strdate[0], currentdate) = = 1 and \
Txts[0].find (KWord) >-1:
print '
Print txt
print ' Active link: ' + urllist[index]
Print Strsplit

strmessage = txt + "\ n"
strmessage + = ' Activity link: ' + urllist[index ' + ' \ n '
strmessage + = Strsplit
strmessage = Unicode (strmessage, ' UTF8 ')
Log. Writelog (strmessage)
Usefulcount = Usefulcount + 1
RecordCount = RecordCount + 1
index = index + 1

PageIndex = PageIndex + 1
If Usefulcount = = 0:
Break


Print "Viewed:%d page data"% Page_count
Print "Total crawled:%d eligible activity records"% RecordCount
Except Exception, E:
Print E.message
Keyword_index = Keyword_index + 1


Self.driver.close ()
Self.driver.quit ()
End = Time.clock ()
Print "Entire process time:%f seconds"% (End-start)


# #测试抓取数据
obj = huodongxing ()
Obj.scrapy_data ()

Configuration file Contents:
[Section]
#IE驱动的路径
Iedriverserver = C:\Program files\internet Explorer\iedriverserver.exe

Pageinteraldelay = 5

#要搜索的标签, if there are multiple, the middle is separated by semicolons
htmllable =//ul[@class = ' event-horizontal-list-new ']/li

#要获取爬虫也是的标签
pagecountlable =//span[@class = ' text-primary ')

#给定网址的搜索首页Url
Websearchurl = Http://www.huodongxing.com/search?city=%E5%85%A8%E5%9B%BD&pi=1


#查找对应的原始url
Originalurllabel =//ul[@class = ' event-horizontal-list-new ']/li/h3/a

#文本输入框要搜索的关键字
keywords = Internet TV; smart TV; digital; av; home entertainment; programs; audiovisual; copyright; data


The data structure to crawl is:



[Python crawler] Nine: Selenium +phantomjs crawl activity row in the meeting activity (single thread crawl)

Related Article

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.