#-*-Coding:utf-8-*-
"""
Created on Mon Mar 21 11:04:54 2017
@author: SL
"""
Import requests
Import time
#################################################################################
############### #先找到对应的爬取验证码连接, for example, I want to crawl the traffic violation information #################################
############## #找到车违章链接http://smart.gzeis.edu.cn:8081/content/authcode.aspx#####################
################ #根据网页源码找到对应的登录链接https://www.stc.gov.cn/szwsjj_web/jsp/xxcx/jdcjtwfcx.jsp#######
############### #根据网页源码找到对应的验证码链接https://www.stc.gov.cn:443/szwsjj_web/imgservlet.action?######
#################################################################################
def downloads_pic (pic_name):
#url = ' http://smart.gzeis.edu.cn:8081/Content/AuthCode.aspx '
Url= ' Https://www.stc.gov.cn/szwsjj_web/ImgServlet.action? '
Res=requests.get (Url,stream=true) # # # #在罕见的情况下你可能想获取来自服务器的原始套接字响应, then you can visit R.raw if you really want to do this, make sure you set the stream= in the initial request True
Print Res
With open (R ' G:\DownloadsVerificationCode\%s.jpg '% (pic_name), ' WB ') as F:
Print Res.iter_content (chunk_size=1024)
For chunk in Res.iter_content (chunk_size=1024): # # # #使用Response. Iter_ Content will deal with a lot of what you have to deal with directly using Response.raw. When the stream is downloaded, the above is the preferred way to get content
Print Chunk
If chunk: # # #过滤下保持活跃的新块
F.write (Chunk)
F.flush () #方法是用来刷新缓冲区的, the data in the buffer is immediately written to the file, while emptying the buffer, does not need to be passive waiting for the output buffer to write
F.close ()
If __name__== ' __main__ ':
For I in range (300):
Pic_name=int (Time.time () *1000000) #返回当前时间的时间戳 (number of floating-point seconds after the 1970 era)
Downloads_pic (Pic_name)
Python's n Small features (find a link to the captcha to crawl and download a sample of the CAPTCHA)