Python3 requests crawling proxy IP and verifying availability

Source: Internet
Author: User
Tags get ip xpath

Import requests
From lxml import etree


# Proxy IP Information Store
def write_proxy (proxies):
Print (proxies)
For proxy in proxies:
With open ("Ip_proxy.txt", ' A + ') as F:
Print ("Writing:", proxy)
F.write (proxy + ' \ n ')
Print ("Input complete!!! ")


# parse the Web page and get the proxy IP in the Web page
def get_proxy (HTML):
# Parse the retrieved page
selector = etree. HTML (HTML)
# Print (Selector.xpath ("//title/text ()"))
proxies = []
# Information Extraction
For each in Selector.xpath ("//tr[@class = ' odd ') | tr[@class = "]"):
# ip.append (Each[0])
# Get IP Address
ip = Each.xpath ("./td[2]/text ()") [0]
# Get Port
Port = Each.xpath ("./td[3]/text ()") [0]
# stitching IP Address, port number
Proxy = IP + ":" + port
# stitching the IP address into the defined empty list
Proxies.append (proxy)
# Calculate a few IP addresses per page
Print (len (proxies))
Test_proxies (proxies)


# Verify that the availability of the IP has been obtained, this section of the code by visiting the Baidu website, the return of the response status code to determine (whether available).
def test_proxies (proxies):
proxies = Proxies
url = "http://www.baidu.com/"
Header = {
"User-agent": "mozilla/5.0 (Windows NT 10.0; WOW64) applewebkit/537.36 (khtml, like Gecko) chrome/67.0.3396.99 safari/537.36 ",
}
Normal_proxies = []
Count = 1
For proxy in proxies:
Print ("%s": "% count")
Count + = 1
Try
Response = Requests.get (URL, headers=header, proxies={"http": proxy}, Timeout=1)
if Response.status_code = = 200:
Print ("Proxy IP available:", proxy)
Normal_proxies.append (proxy)
Else
Print ("The proxy IP is not available:", proxy)
Except Exception:
Print ("The proxy IP is invalid:", proxy)
Pass
# Print (normal_proxies)
Write_proxy (normal_proxies)


def get_html (URL):
Header = {
"User-agent": "mozilla/5.0 (Windows NT 10.0; WOW64) applewebkit/537.36 (khtml, like Gecko) chrome/67.0.3396.99 safari/537.36 ",
}
Response = Requests.get (URL, headers=header)
# Print (Response.text)
Get_proxy (Response.text)


if __name__ = = "__main__":
# Loop Get URLs
Base_url = "http://www.xicidaili.com/nn/%s"
For I in Range (1,4):
url = base_url% i
get_html (URL)

Python3 requests crawling proxy IP and verifying availability

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.