python多線程抓取網頁資訊

來源:互聯網
上載者:User

#!/usr/env  python
#-*- coding: utf-8  -*-
import urllib 
import urllib2 
import random 
import requests
import os,sys 
import Queue
import threading
import time
import MySQLdb
from sgmllib import SGMLParser 
import re
queue = Queue.Queue()
out_queue = Queue.Queue()
num=0

class ThreadUrl(threading.Thread):
    
    def __init__(self, queue, out_queue):
        threading.Thread.__init__(self)
        self.queue = queue
        self.out_queue = out_queue

    def run(self):
        while True:
            
host = self.queue.get()
print host
try:
html=requests.get(host)

result=html.content
html.close()
self.out_queue.put(result)

            #place chunk into out queue
except:
print time.sleep(5)

            #signals to queue job is done
self.queue.task_done()

class DatamineThread(threading.Thread):
   
    def __init__(self, out_queue):
        threading.Thread.__init__(self)
        self.out_queue = out_queue

    def run(self):
        while True:
            
result = self.out_queue.get()
pattern=re.compile('<div class="appdiscrib">[\s\S]*?<h4>(.+?)</h4>')
data0=re.findall(pattern,result)

pattern=re.compile('版 本 號(.+?)</li>')
data1=re.findall(pattern,result)
pattern=re.compile('開 發 者(.+?)</li>')
data2=re.findall(pattern,result)
pattern=re.compile('發布時間(.+?)</li>')
data3=re.findall(pattern,result)
pattern=re.compile('檔案大小(.+?)</li>')
data4=re.findall(pattern,result)
pattern=re.compile('支援韌體(.+?)</li>')
data5=re.findall(pattern,result)
pattern=re.compile('應用介紹</h3>[\s\S]*?<div class="intro">([\s\S]*?)</div>')
data6=re.findall(pattern,result)
for items in data6:
pass#print re.sub('<br />',' ',items)
sql="insert into address(name,version,developer,pubtime,filesize,support,introduction) values(%s,%s,%s,%s,%s,%s,%s)"
for items in data6:

if(data5):
values=(data0[0],data1[0],data2[0],data3[0],data4[0],data5[0],re.sub('<br />',' ',items))
else:
values=(data0[0],data1[0],data2[0],data3[0],data4[0],'NULL',re.sub('<br />',' ',items))
#print values
#print sql % values

try:

conn=MySQLdb.connect(host='localhost',user='root',passwd='123456',db='addressbookdb',charset="utf8")
cursor=conn.cursor() 
cursor.execute(sql,values)
conn.commit()
except:
print "error2"


try:
cursor.close()
conn.close()
except:
print "error3"

pattern=re.compile(' <div class="appTitle clearfix">[\s\S]*?<img src=(.+?)/>')
data=re.findall(pattern,result)
for j in data:
print j
global num
      
try:
temp=requests.get(j[1:-2])
f=file("picture/"+str(num),"w+")
num=num+1
print num
f.write(temp.content)
except:
print "error4"
 
           
self.out_queue.task_done()
def main():

for k in range(1,2539):
print k


try:
url="http://apk.gfan.com/apps_7_1_"+str(k)+".html"

html=requests.get(url)

result=html.content
html.close()
pattern=re.compile('<a href="([http://apk.gfan.com]?/Product/App\d{1,8}.html)"')
dataresult=re.findall(pattern,result)
dataresult=list(set(dataresult))


for a in range(20):
w = ThreadUrl(queue, out_queue)
w.setDaemon(True)
w.start()
for i in dataresult:
host="http://apk.gfan.com"+i

queue.put(host)
for a in range(20):
dt = DatamineThread(out_queue)
dt.setDaemon(True)
dt.start()
except:
time.sleep(5)


queue.join()
out_queue.join()





#sql="select * from address"
#cursor.execute(sql)
#conn.commit()
#finalresult=cursor.fetchall()
#if finalresult:
#for x in finalresult:
#pass #print x[0:]


    
if  __name__=="__main__":
       main()

相關文章

聯繫我們

該頁面正文內容均來源於網絡整理,並不代表阿里雲官方的觀點,該頁面所提到的產品和服務也與阿里云無關,如果該頁面內容對您造成了困擾,歡迎寫郵件給我們,收到郵件我們將在5個工作日內處理。

如果您發現本社區中有涉嫌抄襲的內容,歡迎發送郵件至: info-contact@alibabacloud.com 進行舉報並提供相關證據,工作人員會在 5 個工作天內聯絡您,一經查實,本站將立刻刪除涉嫌侵權內容。

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.