Three methods for downloading images in batches using python

Source: Internet
Author: User

There are three methods: one is to use the extension library win32com provided by Microsoft to operate IE, the other is to use selenium's webdriver, and the third is to use HTMLParser resolution provided by python. Win32com can obtain document objects similar to those in js, but it seems to be read-only (the documents are not found ). Selenium provides support for Chrome, IE, FireFox, etc. Each browser has the execute_script and find_element_by_xx methods, which can easily execute js scripts (including modifying elements) and read elements in html. Selenium only supports python2.6 and 2.7. HTMLParser needs to write a class to inherit the base class and rewrite the method for parsing elements. In my opinion, selenium is more convenient to use and can easily manipulate elements in html.
The Code is as follows:

Win32com:

Copy codeThe Code is as follows:

# Slide the scroll bar to the bottom, sliding up to 20000 pixels
# Right-click the simulated keyboard to view multiple images
Import sys
Import win32com. client, win32api
Import urllib. request
Import time
Import OS

Def main ():
# Getting Parameters
Url = sys. argv [1]
# Operating IE
Ie = win32com. client. Dispatch ("InternetExplorer. Application ")
Ie. Navigate (url)
Ie. Visible = True
Last_url =''
Dir_name =''
While last_url! = Url:
Print ('\ nThe URL is:', url, '\ n ')
While ie. ReadyState! = 4:
Time. sleep (1)
While ie. Document. readyState! = "Complete ":
Time. sleep (1)
# Sliding scroll bar
Win = ie. Document. parentWindow
LastY =-1;
For I in range (40 ):
Win. scrollTo (0,500 * I)
NowY = win. pageYOffset
If (nowY = lastY ):
Break
LastY = nowY
Time. sleep (0.4)
Print ('document load state: ', ie. Document. readyState)
Doc = ie. Document
# Create a directory for the first time
If (dir_name = ''):
Root_dir = 'e: \ img'
Dir_name = root_dir + '\' + doc. title
Dir_name = dir_name.replace ('| ','-')
If (OS. path. exists (root_dir )! = True ):
OS. mkdir (root_dir)
If (OS. path. exists (dir_name )! = True ):
OS. mkdir (dir_name)
All_image = doc. images
Print ('Total', all_image.length, 'image ')
Count = 0;
For img in all_image:
If (img. id = 'B _ img '):
Count = count + 1
Print (count, img. src)
Time. sleep (1)
Img_file = urllib. request. urlopen (img. src)
Byte = img_file.read ()
Print (count, 'donwload complete! ','-'* 10, 'size:', '{:. 3}'. format (byte. _ len _ ()/1024), 'kb ')
If (byte. _ len _ ()> 7000 ):
File_name = img. src. replace ('/','_')
File_name = file_name.replace (':','_')
End = file_name. _ len __()
If (file_name.rfind ('! ')! =-1 ):
End = file_name.rfind ('! ')
If (file_name.rfind ('? ')! =-1 ):
End = file_name.rfind ('? ')
File_name = file_name [: end]
Write_file = open (dir_name + '\' + file_name, 'wb ')
Write_file.write (byte)
Write_file.close ()
Print (count, file_name, 'complete! ')
# Next
Last_url = url
Win32api. keybd_event (39,0)
Time. sleep (1)
Url = ie. Document. url
Print (last_url, url)
# Ie. Quit ()
If _ name _ = '_ main __':
Main ()

Selenium:

Copy codeThe Code is as follows: #-*-coding: cp936 -*-
Import sys
Import urllib
Import time
Import OS
From selenium import webdriver

Def main ():
# Getting Parameters
Url = sys. argv [1]
# Operating IE
Driver = webdriver. Chrome ()
Driver. get (url)
Driver.exe cute_script ("window. scrollTo (0, document. body. scrollHeight );")
# Creating a directory
Dir_name = driver. find_element_by_tag_name ('title'). text
Print dir_name
Root_dir = 'e: \ img'
Dir_name = root_dir + '\' + dir_name
Dir_name = dir_name.replace ('| ','-')
If (OS. path. exists (root_dir )! = True ):
OS. mkdir (root_dir)
If (OS. path. exists (dir_name )! = True ):
OS. mkdir (dir_name)
Images = driver. find_elements_by_tag_name ('img ')
Count = 0
For image in images:
Count = count + 1
Image_url = str (image. get_attribute ('src '))
Img_file = urllib. urlopen (image_url)
Byte = img_file.read ()
Print count, 'donwload complete! ','-'* 10, 'size:', byte. _ len _ ()/1024, 'kb'
If (byte. _ len _ ()> 7000 ):
File_name = image_url.replace ('/','_')
File_name = file_name.replace (':','_')
End = file_name. _ len __()
If (file_name.rfind ('! ')! =-1 ):
End = file_name.rfind ('! ')
If (file_name.rfind ('? ')! =-1 ):
End = file_name.rfind ('? ')
File_name = file_name [: end]
Write_file = open (dir_name + '\' + file_name, 'wb ')
Write_file.write (byte)
Write_file.close ()
Print count, file_name, 'complete! '

Driver. quit ()
If _ name _ = '_ main __':
Main ()

HTMLParser:

Copy codeThe Code is as follows: # import modules used here -- sys is a very standard one
Import sys
Import urllib. request
# Gather our code in a main () function

From html. parser import HTMLParser
Class MyHTMLParser (HTMLParser ):
Def handle_starttag (self, tag, attrs ):
If (tag = 'img '):
For attr in attrs:
If (attr [0] = 'src '):
Img_file = urllib. request. urlopen (attr [1])
Byte = img_file.read ()
# Generate a file if the file is larger than B, add the count, download images, and display html code
If (byte. _ len _ ()> 1000 ):
File_name = attr [1]. replace ('/','_')
File_name = file_name.replace (':','_')
End = file_name. _ len __()
If (file_name.rfind ('! ')! =-1 ):
End = file_name.rfind ('! ')
If (file_name.rfind ('? ')! =-1 ):
End = file_name.rfind ('? ')
File_name = file_name [: end]
# Print (file_name)
Write_file = open ('e: \ img \ '+ file_name, 'wb ')
Write_file.write (byte)
Write_file.close ()

Def main ():
# Getting Parameters
Url = sys. argv [1]
Print ('\ nThe URL is:', url, '\ n ')
# Read The resource pointed to by the url
Html_file = urllib. request. urlopen (url)
Byte_content = html_file.read ()
# Saving html webpages
Url_file = open ('e: \ img \ html \ result.htm', 'wb ')
Url_file.write (byte_content)
Url_file.close ()
# Converting from byte to string
S = str (byte_content, encoding = "UTF-8 ")
# Print (s)
# Bytes. decode (html_file.read ())
Parser = MyHTMLParser (strict = False)
Parser. feed (s)
# Standard boilerplate to call the main () function to begin
# The program.
If _ name _ = '_ main __':
Main ()

Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.