Python crawls weather forecasts from overseas websites.
This article describes how to use Python to crawl overseas weather forecasts. Share it with you for your reference. The details are as follows:
Crawl_weather.py is as follows:
# Encoding = utf-8import httplibimport urllib2import timefrom threading import threadingfrom Queue import Queuefrom time import sleepimport reimport copylang = "fr" count = 0 class Location: # Location (False, "China ", "Beijing", "zh") # Location (True, "", "Asia", "zh") def _ init _ (self, is_beyond_country, country_name, loc_name, lang): self. country_name = country_name self. loc_name = loc_name self. l Ang = lang self. is_beyond_country = is_beyond_countryprn_lock = threading. RLock () def GetLocationURLs (url, recursive): global count if url. find ("weather-forecast ")! =-1: count = count + 1 if count % 500 = 0: prn_lock.acquire () print "count: % d" % (count) prn_lock.release () return [url] page = urllib2.urlopen (url ). read () time. sleep (0.01) # "
FetchLocation. py is as follows:
#encoding=utf-8import sysimport httplibimport urllib2import timefrom threading import Threadimport threadingfrom Queue import Queuefrom time import sleepimport reimport copyfrom xml.dom import minidomimport HTMLParserimport datetimeq = Queue()locks = [threading.RLock() for i in range(2)]ThreadNumber = 20locations = {}conds = {}def FindCountryBreadCrumbs(page): lines = page.splitlines() count = 0 start = -1 opened = False for line in lines: if line.find("<ul id=\"country-breadcrumbs\">") != -1: start = count opened = True if opened and line.find("</ul>") != -1: end = count opened = False count = count + 1 return "\n".join(lines[start: (end + 1)])def GetText(nodelist): rc = [] for node in nodelist: if node.nodeType == node.TEXT_NODE: rc.append(HTMLParser.HTMLParser().unescape(node.data)) return ''.join(rc)def FindCondition(page): pat = "<span class=\"cond\">(.*?)</span>" cds = re.findall(pat, page) cds = [HTMLParser.HTMLParser().unescape(cd).encode("utf-8") for cd in cds] return cds def ExtractInfo(url): try: page = urllib2.urlopen(url).read() except Exception, e: return [] text = FindCountryBreadCrumbs(page) text = HTMLParser.HTMLParser().unescape(text) dom = minidom.parseString(text.encode("utf-8")) locs = [] lis = dom.getElementsByTagName("li") for li in lis: adr_list = li.getElementsByTagName("a") if adr_list: locs.append(GetText(adr_list[0].childNodes).encode("utf-8")) strs = li.getElementsByTagName("strong") if strs: locs.append(GetText(strs[0].childNodes).encode("utf-8")) cds = FindCondition(page) return locs, cdsdef AddMap(lst, m): for x in lst: if m.get(x) == None: m[x] = 1def working(): while True: urls = q.get() #print len(urls) m = {} m2 = {} count = 0 for url in urls: count = count + 1 #print "%d/%d" % (count, len(urls)) locs, cds = ExtractInfo(url) AddMap(locs, m) AddMap(cds, m2) locks[1].acquire() AddMap(m.keys(), locations) AddMap(m2.keys(), conds) locks[1].release() q.task_done()def main(): if len(sys.argv) < 2: exit() loc_path = sys.argv[1] fp = open(loc_path, "r") urls = [line.strip() for line in fp] fp.close() #urls = urls[0:1000] blocks = len(urls) / ThreadNumber + 1 for start in range(0, len(urls), blocks): end = start + blocks if end > len(urls): end = len(urls) q.put(urls[start:end]) for i in range(ThreadNumber): t = Thread(target=working) t.setDaemon(True) t.start() q.join() fp = open("location_name.fr", "w") fp.write("\n".join(locations.keys())) fp.close() fp = open("conditions.fr", "w") fp.write("\n".join(conds.keys())) fp.close()if __name__ == '__main__': main()
I hope this article will help you with python programming.