1.JavaScript encrypt what's the most annoying:-(
1). Eval a function that does not depend on external variables immediately call it's naïve to see me nodejs to kill you!
2). Authentication of HTTP Requests first try referer,"cookies" It's not as important as you think. "
3. Curl and the command line tools to handle the text very handy.
4). But Python doesn't have a few more lines.
2.Requests efficiency is much better than lxml himself.
3.progressbar is too advanced, I'll write it myself ...
4.argparse Write Python command line program necessary AH ~
5.string. Template also works well.
6. The following is the main code, except for the standard library and lxml and requests, none of the modules are in the Omnipotent Winterpy warehouse. In fact, the main code is also in.
#!/usr/bin/env Python3 # vim:fileencoding=utf-8 import sys from functools import partial from string import Template Imp ORT argparse import base64 from urllib.parse import unquote to lxml.html import fromstring import requests from Htmlut Ils import extracttext from termutils import foreach session = requests. Session () def main (index, filename= ' $name-$author. txt ', start=0): R = Session.get (index) r.encoding = ' gb18030 ' doc = FromString (R.text, Base_url=index) doc.make_links_absolute () name = Doc.xpath ('//div[@class = ' info ']/p[1]/a/text () ') [0] Author = doc.xpath ('//div[@class = "info"]/p[1]/span/text ()) [0].split () [-1] Nametmpl = Template (filename) fname = n Ametmpl.substitute (Name=name, Author=author) with open (fname, ' W ') as F:sys.stderr.write (' Download to file%s '). \ n '% fname links = doc.xpath ('//div[@class = ' chapterlist ']/ul/li/a ') Try:foreach (links, partial (gather_content, F . Write), Start=start) except KeyboardInterrupt:sys.stderr.write (' \ n ') sys.exit (130) SyS.stderr.write (' \ n ') return True def gather_content (write, I, L): # curl-xpost-f bookid=2747-f chapterid=2098547 ' HT tp://www.feisuzw.com/skin/hongxiu/include/fe1sushow.php ' #--referer http://www.feisuzw.com/Html/2747/2098547. HTML # Tail +4 # base64-d # sed ' s/&#&/u/g ' # ascii2uni-qaf # ascii2uni-qaj # <p> Paragraphs URL = L.get (' href ') _, _, _, _, BookID, Chapterid = Url.split ('/') Chapterid = Chapterid.split ('. ', 1) [0] r = Session.post (' Http://www.feisuzw.com/skin/hongxiu/include/fe1sushow.php ', data={' BookID ': BookID, ' Chapterid ': Chapterid,}, headers={' Referer ': url}) Text = r.content[3:] # strip BOM text = base64.decodebytes (text). replace (b ' &#& ', br ' \ U ') Text = Text.decode (' unicode_escape ') Text = unquote (text) Text = Text.replace (' <p> ', '). Replace (' </p> ' , ' \ n ') title = L.text Write (title) write (' \ n ') write (text) write (' \ n ') return title if __name__ = ' __main__ ' : Parser = Argparse. Argumentparser (description= ' Download fast Chinese web novel ') parser.add_argument (' url ', help= ' novel homepage link ') parser.add_argument (' name ', default= ' $name-$au Thor.txt ', nargs= '? ', help= ' save filename template (Support $name and $author ') parser.add_argument (' s ', '--start ', default=1, type=
int, metavar= ' N ', help= ' download starting page position (starting with 1) ') args = Parser.parse_args () main (Args.url, Args.name, args.start-1)