python內建了urllib和urllib2模組,以及第三方的requests庫來抓取網頁,這裡我們使用easy_install包管理工具下載requests庫,BeautifulSoup庫,在CMD命令列下,切換到easy_install的目錄,運行命令easy_install 包名稱。
easy_install requests
安裝好requests包之後,我們就可以選擇使用urllib,urllib2或requests庫來抓取網頁了
1.網頁內容的抓取
#! /usr/bin/env python#coding:utf-8import urllibimport urllib2import requestsimport sysurl = 'http://www.csdn.net'def urllib2Test():req = urllib2.Request(url)response = urllib2.urlopen(req)thePage = response.read()def requestsTest():r = requests.get(url)r.status_coder.contentr.headersdef urllib2TestEx(url):req = urllib2.Request(url)try:response = urllib2.urlopen(req)content = response.read()except urllib2.URLError,e:print e.reasondef urlhttperror(url):req = urllib2.Request(url)try:urllib2.urlopen(req)except urllib2.HTTPError,e:print e.read()if __name__ == '__main__':urllib2Test()requestsTest()urllib2TestEx(url)urlhttperror(url)
2.爬蟲偽裝成瀏覽器的訪問
對于禁止爬蟲的網站,可以偽裝成瀏覽器訪問,在請求中加入UserAgent的資訊。添加和修改headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}req = urllib2.Request("http://blog.csdn.net/nevasun", headers=headers)
偽裝成瀏覽器訪問如下
#! /usr/bin/env python#coding:utf-8import requestsfrom BeautifulSoup import BeautifulSoupfrom os.path import dirname, abspathimport sysimport os#PREFIX = dirname(abspath(__file__))## 這段代碼是用於解決中文報錯的問題 reload(sys) sys.setdefaultencoding("utf8") ##################################################### defaultWaitTime = 1def getHtmlContent(url):global defaultWaitTimecontent = Noneretry = 0headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.57 Safari/537.36'}AccessFrequency = defaultWaitTimewhile retry < 5:try:r = requests.get(url,timeout=10,headers=headers)content = r.contentreturn contentexcept:retry+=1time.sleep(AccessFrequency)return contentdef Test():content = getHtmlContent("http://blog.csdn.net/figo829/article/details/18015537")#print contentif __name__ == '__main__':Test()