Import requests
From lxml import etree
# Proxy IP Information Store
def write_proxy (proxies):
Print (proxies)
For proxy in proxies:
With open ("Ip_proxy.txt", ' A + ') as F:
Print ("Writing:", proxy)
F.write (proxy + ' \ n ')
Print ("Input complete!!! ")
# parse the Web page and get the proxy IP in the Web page
def get_proxy (HTML):
# Parse the retrieved page
selector = etree. HTML (HTML)
# Print (Selector.xpath ("//title/text ()"))
proxies = []
# Information Extraction
For each in Selector.xpath ("//tr[@class = ' odd ') | tr[@class = "]"):
# ip.append (Each[0])
# Get IP Address
ip = Each.xpath ("./td[2]/text ()") [0]
# Get Port
Port = Each.xpath ("./td[3]/text ()") [0]
# stitching IP Address, port number
Proxy = IP + ":" + port
# stitching the IP address into the defined empty list
Proxies.append (proxy)
# Calculate a few IP addresses per page
Print (len (proxies))
Test_proxies (proxies)
# Verify that the availability of the IP has been obtained, this section of the code by visiting the Baidu website, the return of the response status code to determine (whether available).
def test_proxies (proxies):
proxies = Proxies
url = "http://www.baidu.com/"
Header = {
"User-agent": "mozilla/5.0 (Windows NT 10.0; WOW64) applewebkit/537.36 (khtml, like Gecko) chrome/67.0.3396.99 safari/537.36 ",
}
Normal_proxies = []
Count = 1
For proxy in proxies:
Print ("%s": "% count")
Count + = 1
Try
Response = Requests.get (URL, headers=header, proxies={"http": proxy}, Timeout=1)
if Response.status_code = = 200:
Print ("Proxy IP available:", proxy)
Normal_proxies.append (proxy)
Else
Print ("The proxy IP is not available:", proxy)
Except Exception:
Print ("The proxy IP is invalid:", proxy)
Pass
# Print (normal_proxies)
Write_proxy (normal_proxies)
def get_html (URL):
Header = {
"User-agent": "mozilla/5.0 (Windows NT 10.0; WOW64) applewebkit/537.36 (khtml, like Gecko) chrome/67.0.3396.99 safari/537.36 ",
}
Response = Requests.get (URL, headers=header)
# Print (Response.text)
Get_proxy (Response.text)
if __name__ = = "__main__":
# Loop Get URLs
Base_url = "http://www.xicidaili.com/nn/%s"
For I in Range (1,4):
url = base_url% i
get_html (URL)
Python3 requests crawling proxy IP and verifying availability