This article mainly introduces two methods for using urllib2 to forge an HTTP header in python, that is, forging an http header information. For more information, see, it is often necessary to forge a header to effectively execute the collection script.
Next, we will use the urllib2 header to partially forge the header to collect information.
Method 1,
#! /Usr/bin/python #-*-coding: UTF-8-*-# encoding = UTF-8 # Filename: urllib2-header.py import urllib2import sys # Capture Web content-send header-1url = "http://www.jb51.net" send_headers = {'host': 'www .jb51.net', 'user-Agent ': 'mozilla/5.0 (Windows NT 6.2; rv: 16.0) Gecko/20100101 Firefox/100', 'accept': 'Text/html, application/xhtml + xml, application/xml; q = 0.9, */*; q = 0.8 ', 'connection': 'Keep-alive'} req = urllib2.Request (url, headers = send_headers) r = urllib2.urlopen (req) html = r. read () # returned webpage content receive_header = r.info () # returned header information # sys. getfilesystemencoding () html = html. decode ('utf-8', 'replace '). encode (sys. getfilesystemencoding () # Transcoding: print receive_header # print '############################### ##### 'print html
Method 2,
#!/usr/bin/python# -*- coding: utf-8 -*-#encoding=utf-8#Filename:urllib2-header.py import urllib2import sys url = 'http://www.jb51.net' req = urllib2.Request(url)req.add_header('Referer','http://www.jb51.net/')req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.2; rv:16.0) Gecko/20100101 Firefox/16.0')r = urllib2.urlopen(req) html = r.read()receive_header = r.info() html = html.decode('utf-8').encode(sys.getfilesystemencoding()) print receive_headerprint '#####################################'print html