This article mainly introduces Pythonurllib, urllib2, and httplib to capture web page code instances. This article provides the demo code directly. the code contains detailed annotations. For more information, see urllib2.
I tried to use a proxy to log in and pull the cookie and jump to capture the image ......
Document: http://docs.python.org/library/urllib2.html
Go directly to the demo code
Including direct pulling, using Reuqest (post/get), using proxy, cookie, and redirect processing
#! /Usr/bin/python #-*-coding: UTF-8-*-# urllib2_test.py # author: wklken #2012-03-17 wklken@yeah.netimport urllib, urllib2, cookielib, socketurl =" http://www.testurl ..... "# Change yourself # simplest method: def use_urllib2 (): try: f = urllib2.urlopen (url, timeout = 5 ). read () failed t urllib2.URLError, e: print e. reason print len (f) # use Requestdef get_request (): # timeout socket can be set. setdefatimetimeout (5) # you can add the parameter [no parameter, use get, in this way, use post] params = {"wd": "a", "B ": "2"} # you can add request header information to identify I _headers = {"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv: 1.9.1) Gecko/20090624 Firefo X/3.5 "," Accept ":" text/plain "} # use post, have some params post to server, if not support, will throw exception # req = urllib2.Request (url, data = urllib. urlencode (params), headers = I _headers) req = urllib2.Request (url, headers = I _headers) # After creating a request, you can add other parameters. if the key is duplicate, the latter takes effect # request. add_header ('accept', 'application/json') # you can specify the submission method # request. get_method = lambda: 'put' try: page = urllib2.urlopen (req) pr Int len (page. read () # like get # url_params = urllib. urlencode ({"a": "1", "B": "2"}) # final_url = url + "? "+ Url_params # print final_url # data = urllib2.urlopen (final_url ). read () # print "Method: get", len (data) failed t urllib2.HTTPError, e: print "Error Code:", e. code failed t urllib2.URLError, e: print "Error Reason:", e. reasondef use_proxy (): enable_proxy = False proxy_handler = urllib2.ProxyHandler ({"http ":" http://proxyurlXXXX.com:8080 "}) Handler = urllib2.ProxyHandler ({}) if enable_proxy: opener = urllib2.build _ opener (proxy_handler, Handler) else: opener = urllib2.build _ opener (Handler, urllib2.HTTPHandler) # set the global opener urllib2.install _ opener (opener) content = urllib2.urlopen (url) of urllib2 ). read () print "proxy len:", len (content) class NoExceptionCookieProcesser (urllib2.HTTPCookieProcessor): def http_error_403 (self, req, fp, code, msg, hdrs ): return fp def http_error_400 (self, req, fp, code, msg, hdrs): return fp def http_error_500 (self, req, fp, code, msg, hdrs ): return fpdef hand_cookie (): cookie = cookielib. cookieJar () # cookie_handler = handler (cookie) # after add error exception handler cookie_handler = NoExceptionCookieProcesser (cookie) opener = urllib2.build _ opener (cookie_handler, handler) url_login =" https://www.yourwebsite/?login "Params = {" username ":" user "," password ":" 111111 "} opener. open (url_login, urllib. urlencode (params) for item in cookie: print item. name, item. value # urllib2.install _ opener (opener) # content = urllib2.urlopen (url ). read () # print len (content) # URLdef get_request_direct (): import httplib. HTTPConnection. debuglevel = 1 request = urllib2.Request (" http://www.google.com ") Request. add_header ("Accept", "text/html, */*") request. add_header ("Connection", "Keep-Alive") opener = urllib2.build _ opener () f = opener. open (request) print f. url print f. headers. dict print len (f. read () if _ name _ = "_ main _": use_urllib2 () get_request () get_request_direct () use_proxy () hand_cookie ()