Capture seeds with URLLIB2 and re modules
Ideas
1. Log in to the forum (if you need to log in to access the section)
2. Visit the designated section
3. Traverse the post (Specify the page first, then traverse the URL of all posts on the page)
4. Iterate through all post URLs and take a torrent download address from the post page code (through regular expressions or third-party page parsing libraries)
5. Visit torrent page to download torrents
Copy the Code code as follows:
Import Urllib
Import Urllib2
Import Cookielib
Import re
Import Sys
Import OS
# site is website address | FID is part ID
site = "http://xxx.yyy.zzz/"
Source = "Thread0806.php?fid=x&search=&page="
Btsave = "./clyzwm/"
If Os.path.isdir (btsave):
Print Btsave + "existing"
Else
Os.mkdir (Btsave)
LogFile = "./clyzwm/down.log"
ErrorFile = "./clyzwm/error.log"
Sucfile = "./clyzwm/sucess.log"
headers = {' user-agent ': ' mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) applewebkit/537.36 (khtml, like Gecko) chrome/32.0.1700.77 safari/537.36 ',
' Referer ': ' http://xxx.yyy.zzz/'}
def btdown (URL, dirpath):
Logger (logfile, "Download file:" + URL)
Try
#pageCode = Urllib2.urlopen (URL). Read ()
#print Pagecode
BTSTEP1 = Re.findall (' http://[\w]+\.[ \w]+\. [\w] {0,4}/[\w]{2,6}\.php\? [\w] {2,6}= ([\w]+) ', URL, re. I)
#print BTSTEP1
If Len (BTSTEP1) >0:
ref = Btstep1[0]
Downsite = ""
Downdata = {}
If Len (ref) >20:
Downsite = Re.findall (' http://www.[ \w]+\. [\w]+/', url] [0]
Downsite = Downsite + "download.php"
Reff = Re.findall (' input\stype=\ ' hidden\ "\sname=\" reff\ "\svalue=\" ([\w=]+) \ "', Urllib2.urlopen (URL)." Read (), Re. I) [0]
Downdata = {' ref ': Ref, ' Reff ': Reff, ' submit ': ' Download '}
Else
Downsite = "http://www.downhh.com/download.php"
Downdata = {' ref ': Ref, ' rulesubmit ': ' Download '}
#print "BT site-" + downsite + "\ n downdata:"
#print Downdata
Downdata = Urllib.urlencode (downdata)
Downreq = Urllib2. Request (Downsite, Downdata)
Downreq.add_header (' user-agent ', ' mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_1) applewebkit/537.36 (khtml, like Gecko) chrome/32.0.1700.77 safari/537.36 ')
Downpost = Urllib2.urlopen (downreq)
Stream = Downpost.read (-1)
if (len (stream) > 1000):
Downpost.close ()
Name = btstep1[0]+ ". Torrent"
FW = Open (Dirpath + name, ' W ')
Fw.write (Stream)
Fw.close ()
Logger (sucfile, url+ "\ n")
Else
Logger (errorfile, url+ "\ n")
Except Urllib2. Urlerror, E:
Print E.reason
def logger (logfile, msg):
Print msg
FW = open (logfile, ' a ')
Fw.write (msg)
Fw.close ()
For I in range (1, 1000):
Logger (logfile, "\n\n\n@ page" + str (i) + "...")
Part = site + source + str (i)
Content = Urllib2.urlopen (part). Read ()
Content = Content.decode (' GBK '). Encode (' UTF8 ')
#print Content
Pages = Re.findall ("', Content,re. I)
#print pages
For page in pages:
page = site + page;
#logger (logfile, "\n# visiting" + page + "...")
Pagecode = Urllib2.urlopen (page). Read ()
#print Pagecode
Zzjump = Re.findall (' http://www.viidii.info/\?http://[\w]+/[\w]+\?[ \w]{2,6}=[\w]+ ', Pagecode)
#zzJump = Re.findall (' http://www.viidii.info/\?http://[\w/\?=]* ', Pagecode)
If Len (zzjump) > 0:
Zzjump = zzjump[0]
#print "-Jump page-" + zzjump
Pagecode = Urllib2.urlopen (page). Read ()
Zzpage = Re.findall (' http://[\w]+\.[ \w]+\. [\w]+/link[\w]?\.php\? [\w] {2,6}=[\w]+ ', pagecode)
If Len (zzpage) > 0:
Zzpage = zzpage[0]
Logger (logfile, "\n-zhongzi page-" + zzpage)
Btdown (Zzpage, Btsave)
Else
Logger (logfile, "\ n. Not FOUND.")
Else
Logger (logfile, "\ n ... Not FOUND ... ")
Zzpage = Re.findall (' http://[\w]+\.[ \w]+\. [\w]+/link[\w]?\.php\?ref=[\w]+ ', Pagecode)