This article describes the Python multi-threaded combination queue download Baidu music method. Share to everyone for your reference. Specific as follows:
Always want to do a download music script, later decided to take Baidu music surgery, after many analysis, finally produced a download Baidu music script, currently only the default download the first page, children's shoes can be freely expanded.
For Windows and Linux platforms, relying on the BeautifulSoup library, primarily parsing HTML
#!/usr/bin/python#-*-coding:utf-8-*-"' Baidu in bulk download a singer's song (currently only download the first page, you can expand yourself) @author: admin @qq: 1243385033" Import threading, URLLIB2, Os,re,sysfrom bs4 import beautifulsoupfrom queue import queue ' ' target singer ' SINGER = U ' Yadong ' ' Save path ' save_ FOLDER = ' f:/music/' # query Urlsearch_url = "Http://music.baidu.com/search/song?key=%s&s=1" # Baidu music player Box Urlsong_url = " Http://box.zhangmen.baidu.com/x?op=12&count=1&mtype=1&title= "Class Downloader (threading. Thread): Def __init__ (self, Task): Threading. Thread.__init__ (self) self.task = task def run (self): "The Run method overriding the parent class" While True:url = Self.task.get () Self.download (URL) self.task.task_done () def build_path (self, filename): join = Os.path.join Parentpath=joi N (save_folder,singer) filename = filename + '. mp3 ' MyPath = join (parentpath, filename) return MyPath def Downloa D (self, url): ' download file ' Sub_url = Url.items () f_name = sub_url[0][0] Req_url = sub_url[0][1] Handle = Urll Ib2.urlopen (req_url) # save Path Save_path = Self.build_path (f_name) with open (Save_path, "WB") as Handler:while True:c Hunk = Handle.read (1024x768) if not chunk:break handler.write (chunk) msg = u "has been downloaded from%s"% req_ URL sys.stdout.write (msg) Sys.stdout.flush () class Httprequest:def __init__ (self): Self.task = [] Self.re G_decode = Re.compile ('
.*? cdata\[(. *?)
\]].*?
') Self.reg_encode = Re.compile ('
.*? cdata\[(. *?)
\]].*?
') self.init () Self.target_url = search_url% urllib2.quote (Self.encode2utf8 (SINGER)) def Encode2utf8 (Self,source): If source and isinstance (source, (Str,unicode)): Source=source.encode ("UTF8") return source return source def mkDir (self, dir_name): If isn't os.path.exists (dir_name): Os.mkdir (Dir_name) def init (self): Self.mkdir (SAV E_folder) subpath = Os.path.join (Save_folder, SINGER) Self.mkdir (subpath) def http_request (self): global Song_ur L ' Initiate request ' Response=urllib2.urlopen (self.target_url) # get header information content = Response.read () response.close () # use BeautifulSoup HTML = beautifulsoup (content, from_encoding= "UTF8") # extract HTML tags span_tag = html.find_all (' div ' {"Monkey": "Song-list"}) [0].find_all (' span ', class_= ' Song-title ') # Traverse list for A_tag in Span_tag:song_name = Unicode (A_tag.find_all ("a") [0].get_text ()) Song_url = Song_url + urllib2.quote (Self.encode2utf8 (song_name)) so Ng_url = Song_url + ' $$ ' + Urllib2.quote (Self.encode2utf8 (SINGER)) + ' $$$$&url=&listenreelect=0&.r=0.1696378872729838 ' xmlfile = Urllib2.urlopen (song_url) xml_content = Xmlfile.read () xmlfile.close () Url1 = Re.findall (Self.reg_encode, xml_content) Url2 = Re.findall (Self.reg_decode, xml_content) if not url1 or not url2:continue URL = Url1[0][:url1[0].rindex ('/') + 1] + url2[0] self.task.append ({song_name:url}) return Self.taskdef start_download ( URLs): #创建一个队列 quene=queue () #获取list的大小 Size=len (URLs) #开启线程 for _ in xrange (size): T=downloader (Quene) t.setd Aemon (True) T.start () #入队列 for URL in urls:quene.put (URL) quene.join () if __name__== ' __main__ ': http=httprequest () Urls=http.http_request () start_download (URLs)
Hopefully this article will help you with Python programming.