#采用request-BS4 Route realizes Chinese University ranked Directional crawler # optimization of mixed-output problems in China and English import requests from BS4 import beautifulsoup import BS4 #import BS4 to use its Label type definition
def gethtmltext (URL): try:r = requests.get (URL, timeout =) # print (R.status_code) 200 is normal, all other information is incorrect R.raise_for_status () r.encoding = r.apparent_encoding return r.text except:return ' Get Faile D. '
def fillunivlist (ulist, HTML): Soup =beautifulsoup (HTML, ' Html.parser ') #print (soup[0:500)) Error: Typeerror:unhashable type: ' Slice ' for tr insoup.find (' tbody '). Children: if ISI Nstance (TR, bs4.element.Tag): # isinstance (object,classinfo) isinstance function to determine whether an object is an instance of a type #tr有可能不是标签类型, here to determine the role of filtering TDS =tr (' TD ') &N bsp; # put all TD tags into list TDS # Print (tds[0]) output:  &NBSP ; # Print (TDS) ulist.append ([Tds[0].string, Tds[1]. String, tds[3].string]) def printunivlist (ulist, num): #tplt = ' {: ^10}\t{1:{3}^10}\t{:^10} ' ERROR: Valueerror:cannot switchfrom automatic field numbering to manual fieldspecification Sp TPLT = ' {0:^10}\t{1:{3}^10}\T{2:^10} ' #其中0 indicates the first fill position, 1 the second, and so on #: The character followed by the fill, can only be one character, not specified the default is to fill the with a half-width space #p Rint (' {: ^10}\t{: ^6}\t{: ^10} '. Format (' rank ', ' School name ', ' Total Score '))//Error: Keyerror: ' #print ({: ^10}\t{:^6}\t{:^10} . Format (' rank ', ' School name ', ' total score ') print (Tplt.format (' rank ', ' School name ', ' Total score ', Chr (12288))) #chr (12288) Should be a full-width space #一个汉字也只算一个字符 for i inrange (num): u = ulist[i] Print (Tplt.format (u[0], u[1], U[2],CHR (12288))
def main (): Uinfo = [] url = ' http://www.zuihaodaxue.cn/zuihaodaxuepaiming2016.html ' html =gethtmltext (URL) Fillunivlist (uinfo,html) printunivlist (uinfo) # univ Main () B Station Learning connection: "Python web crawler and information extraction." Mooc. Beijing Institute
The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion;
products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the
content of the page makes you feel confusing, please write us an email, we will handle the problem
within 5 days after receiving your email.
If you find any instances of plagiarism from the community, please send an email to:
info-contact@alibabacloud.com
and provide relevant evidence. A staff member will contact you within 5 working days.