python 爬取指定url的ICP備案資訊

來源:互聯網
上載者:User
#coding=gbk import osimport sysimport reimport timeimport urllib2   def perror_and_exit(message, status = -1):    sys.stderr.write(message + '\n')    sys.exit(status)   def get_text_from_html_tag(html):    pattern_text =  re.compile(r">.*?    return pattern_text.findall(html)[0][1:-2].strip()   def parse_alexa(url):    url_alexa = "http://icp.alexa.cn/index.php?q=%s" % url    print url_alexa    #handle exception     times = 0    while times < 5000: #等待有一定次數限制         try:            alexa = urllib2.urlopen(url_alexa).read()               pattern_table = re.compile(r".*?", re.DOTALL | re.MULTILINE)            match_table = pattern_table.search(alexa)            if not match_table:                raise BaseException("No table in HTML")            break        except:            print "try %s times:sleep %s seconds" % (times, 2**times)            times += 1            time.sleep(2**times)            continue       table = match_table.group()    pattern_tr = re.compile(r".*?", re.DOTALL | re.MULTILINE)    match_tr = pattern_tr.findall(table)    if len(match_tr) != 2:        perror_and_exit("table format is incorrect")           icp_tr = match_tr[1]    pattern_td = re.compile(r".*?", re.DOTALL | re.MULTILINE)    match_td = pattern_td.findall(icp_tr)           #print match_td     company_name = get_text_from_html_tag(match_td[1])    company_properties = get_text_from_html_tag(match_td[2])    company_icp = get_text_from_html_tag(match_td[3])    company_icp = company_icp[company_icp.find(">") + 1:]    company_website_name = get_text_from_html_tag(match_td[4])    company_website_home_page = get_text_from_html_tag(match_td[5])    company_website_home_page = company_website_home_page[company_website_home_page.rfind(">") + 1:]    company_detail_url = get_text_from_html_tag(match_td[7])    pattern_href = re.compile(r"href=\".*?\"", re.DOTALL | re.MULTILINE)    match_href = pattern_href.findall(company_detail_url)    if len(match_href) == 0:        company_detail_url = ""    else:        company_detail_url = match_href[0][len("href=\""):-1]    return [url, company_name, company_properties, company_icp, company_website_name, company_website_home_page, company_detail_url]    pass   if __name__ == "__main__":    fw = file("out.txt", "w")    for url in sys.stdin:        fw.write("\t".join(parse_alexa(url)) + "\n")  #coding=gbkimport osimport sysimport reimport timeimport urllib2  def perror_and_exit(message, status = -1):    sys.stderr.write(message + '\n')    sys.exit(status)  def get_text_from_html_tag(html):    pattern_text =  re.compile(r">.*?    return pattern_text.findall(html)[0][1:-2].strip()  def parse_alexa(url):    url_alexa = "http://icp.alexa.cn/index.php?q=%s" % url    print url_alexa    #handle exception    times = 0    while times < 5000: #等待有一定次數限制        try:            alexa = urllib2.urlopen(url_alexa).read()              pattern_table = re.compile(r".*?", re.DOTALL | re.MULTILINE)            match_table = pattern_table.search(alexa)            if not match_table:                raise BaseException("No table in HTML")            break        except:            print "try %s times:sleep %s seconds" % (times, 2**times)            times += 1            time.sleep(2**times)            continue      table = match_table.group()    pattern_tr = re.compile(r".*?", re.DOTALL | re.MULTILINE)    match_tr = pattern_tr.findall(table)    if len(match_tr) != 2:        perror_and_exit("table format is incorrect")         icp_tr = match_tr[1]    pattern_td = re.compile(r".*?", re.DOTALL | re.MULTILINE)    match_td = pattern_td.findall(icp_tr)         #print match_td    company_name = get_text_from_html_tag(match_td[1])    company_properties = get_text_from_html_tag(match_td[2])    company_icp = get_text_from_html_tag(match_td[3])    company_icp = company_icp[company_icp.find(">") + 1:]    company_website_name = get_text_from_html_tag(match_td[4])    company_website_home_page = get_text_from_html_tag(match_td[5])    company_website_home_page = company_website_home_page[company_website_home_page.rfind(">") + 1:]    company_detail_url = get_text_from_html_tag(match_td[7])    pattern_href = re.compile(r"href=\".*?\"", re.DOTALL | re.MULTILINE)    match_href = pattern_href.findall(company_detail_url)    if len(match_href) == 0:        company_detail_url = ""    else:        company_detail_url = match_href[0][len("href=\""):-1]    return [url, company_name, company_properties, company_icp, company_website_name, company_website_home_page, company_detail_url]    pass  if __name__ == "__main__":    fw = file("out.txt", "w")    for url in sys.stdin:        fw.write("\t".join(parse_alexa(url)) + "\n")[python] view plaincopyprint? time.sleep(2)    pass   time.sleep(2)    pass

每次抓取都會sleep 2s,防止ip被封,實際上即使sleep了IP過一段時間還是會被封

由於是結構化抓取,當網站格式變化此程式將無法使用

  • 聯繫我們

    該頁面正文內容均來源於網絡整理,並不代表阿里雲官方的觀點,該頁面所提到的產品和服務也與阿里云無關,如果該頁面內容對您造成了困擾,歡迎寫郵件給我們,收到郵件我們將在5個工作日內處理。

    如果您發現本社區中有涉嫌抄襲的內容,歡迎發送郵件至: info-contact@alibabacloud.com 進行舉報並提供相關證據,工作人員會在 5 個工作天內聯絡您,一經查實,本站將立刻刪除涉嫌侵權內容。

    A Free Trial That Lets You Build Big!

    Start building with 50+ products and up to 12 months usage for Elastic Compute Service

    • Sales Support

      1 on 1 presale consultation

    • After-Sales Support

      24/7 Technical Support 6 Free Tickets per Quarter Faster Response

    • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.