使用re, urllib, threading 多線程抓取天涯文章內容,設定url為需抓取的天涯文章的第一頁,設定file_name為下載後的檔案名稱
代碼如下:
#coding:utf-8
import urllib
import re
import threading
import os, time
class Down_Tianya(threading.Thread):
"""多線程下載"""
def __init__(self, url, num, dt):
threading.Thread.__init__(self)
self.url = url
self.num = num
self.txt_dict = dt
def run(self):
print 'downling from %s' % self.url
self.down_text()
def down_text(self):
"""根據傳入的url抓出各頁內容,按頁數做鍵存入字典"""
html_content =urllib.urlopen(self.url).read()
text_pattern = re.compile('時間:(.*?).*?.*?\s*(.*?)', re.DOTALL)
text = text_pattern.findall(html_content)
text_join = ['\r\n\r\n\r\n\r\n'.join(item) for item in text]
self.txt_dict[self.num] = text_join
def page(url):
"""根據第一頁地址抓取總頁數"""
html_page = urllib.urlopen(url).read()
page_pattern = re.compile(r'(\d*)\s*下頁')
page_result = page_pattern.search(html_page)
if page_result:
page_num = int(page_result.group(1))
return page_num
def write_text(dict, fn):
"""把字典內容按鍵(頁數)寫入文本,每個索引值為每頁內容的list列表"""
tx_file = open(fn, 'w+')
pn = len(dict)
for i in range(1, pn+1):
tx_list = dict[i]
for tx in tx_list:
tx = tx.replace('
', '\r\n').replace('
', '\r\n').replace(' ', '')
tx_file.write(tx.strip()+'\r\n'*4)
tx_file.close()
def main():
url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
file_name ='abc.txt'
my_page = page(url)
my_dict = {}
print 'page num is : %s' % my_page
threads = []
"""根據頁數構造urls進行多線程下載"""
for num in range(1, my_page+1):
myurl = '%s%s.shtml' % (url[:-7], num)
downlist = Down_Tianya(myurl, num, my_dict)
downlist.start()
threads.append(downlist)
"""檢查下載完成後再進行寫入"""
for t in threads:
t.join()
write_text(my_dict, file_name)
print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__':
main()
down_tianya.py
代碼如下:
#coding:utf-8
import urllib
import re
import threading
import os
class Down_Tianya(threading.Thread):
"""多線程下載"""
def __init__(self, url, num, dt):
threading.Thread.__init__(self)
self.url = url
self.num = num
self.txt_dict = dt
def run(self):
print 'downling from %s' % self.url
self.down_text()
def down_text(self):
"""根據傳入的url抓出各頁內容,按頁數做鍵存入字典"""
html_content =urllib.urlopen(self.url).read()
text_pattern = re.compile('時間:(.*?).*?.*?\s*(.*?)', re.DOTALL)
text = text_pattern.findall(html_content)
text_join = ['\r\n\r\n\r\n\r\n'.join(item) for item in text]
self.txt_dict[self.num] = text_join
def page(url):
"""根據第一頁地址抓取總頁數"""
html_page = urllib.urlopen(url).read()
page_pattern = re.compile(r'(\d*)\s*下頁')
page_result = page_pattern.search(html_page)
if page_result:
page_num = int(page_result.group(1))
return page_num
def write_text(dict, fn):
"""把字典內容按鍵(頁數)寫入文本,每個索引值為每頁內容的list列表"""
tx_file = open(fn, 'w+')
pn = len(dict)
for i in range(1, pn+1):
tx_list = dict[i]
for tx in tx_list:
tx = tx.replace('
', '\r\n').replace('
', '\r\n').replace(' ', '')
tx_file.write(tx.strip()+'\r\n'*4)
tx_file.close()
def main():
url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
file_name ='abc.txt'
my_page = page(url)
my_dict = {}
print 'page num is : %s' % my_page
threads = []
"""根據頁數構造urls進行多線程下載"""
for num in range(1, my_page+1):
myurl = '%s%s.shtml' % (url[:-7], num)
downlist = Down_Tianya(myurl, num, my_dict)
downlist.start()
threads.append(downlist)
"""檢查下載完成後再進行寫入"""
for t in threads:
t.join()
write_text(my_dict, file_name)
print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__':
main()