python基於新浪sae開發的公眾平台,實現功能:
輸入段子---回複笑話
輸入開源+文章---發送訊息到開源中國
輸入快遞+訂單號---查詢快遞資訊
輸入天氣---查詢南京最近五天天氣狀況
輸入微博熱點---回複微博當前熱門話題
輸入電影+名稱---回複百度雲端硬碟中搜尋的連結
具體實現代碼:
# -*- coding: utf-8 -*-import hashlibimport webimport lxmlimport timeimport osimport urllib2,jsonimport urllibimport reimport randomimport hashlibimport cookielibfrom urllib import urlencodefrom lxml import etree class WeixinInterface: def __init__(self): self.app_root = os.path.dirname(__file__) self.templates_root = os.path.join(self.app_root, 'templates') self.render = web.template.render(self.templates_root) def GET(self): #擷取輸入參數 data = web.input() signature=data.signature timestamp=data.timestamp nonce=data.nonce echostr=data.echostr #自己的token token="weixin9047" #這裡改寫你在公眾平台裡輸入的token #字典序排序 list=[token,timestamp,nonce] list.sort() sha1=hashlib.sha1() map(sha1.update,list) hashcode=sha1.hexdigest() #sha1密碼編譯演算法 #如果是來自的請求,則回複echostr if hashcode == signature: return echostr def POST(self): str_xml = web.data() #獲得post來的資料 xml = etree.fromstring(str_xml)#進行XML解析 content=xml.find("Content").text#獲得使用者所輸入的內容 msgType=xml.find("MsgType").text fromUser=xml.find("FromUserName").text toUser=xml.find("ToUserName").text if(content == u"天氣"): url = "http://m.ip138.com/21/nanjing/tianqi/" headers = { 'Connection': 'Keep-Alive', 'Accept': 'text/html, application/xhtml+xml, */*', 'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'} req = urllib2.Request(url, headers = headers) opener = urllib2.urlopen(req) html = opener.read() rex = r'(?<=img src="/image/s[0-9].gif" alt=").{1,6}(?=" />)' rexx = r'(?<=div class="temperature">).{5,15}(?=)' n = re.findall(rex,html) m = re.findall(rexx,html) str_wether = "" for (i,j) in zip(m,n): str_wether = str_wether + j + " " +i + "\n" return self.render.reply_text(fromUser,toUser,int(time.time()),"最近五天天氣:\n"+str_wether) elif(content[0:2] == u"電影"): keyword = urllib.quote(content[2:].encode("utf-8")) url = "http://www.wangpansou.cn/s.php?q="+keyword headers = { 'Connection': 'Keep-Alive', 'Accept': 'text/html, application/xhtml+xml, */*', 'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'} req = urllib2.Request(url, headers = headers) opener = urllib2.urlopen(req) html = opener.read() rex = r'https?://pan.baidu.com.*\?uk=[0-9]{10}.*[\d+?]"' m = re.findall(rex,html) string = u"" for i in m: string = string + i + "\n" return self.render.reply_text(fromUser,toUser,int(time.time()),u"以下是電影連結:\n"+string) elif(u"段子" in content): url_8 = "http://www.qiushibaike.com/" url_24 = "http://www.qiushibaike.com/hot/" headers = { 'Connection': 'Keep-Alive', 'Accept': 'text/html, application/xhtml+xml, */*', 'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64; Trident/7.0; rv:11.0) like Gecko'} req_8 = urllib2.Request(url_8, headers = headers) req_24 = urllib2.Request(url_24,headers = headers) opener_8 = urllib2.urlopen(req_8) opener_24 = urllib2.urlopen(req_24) html_8 = opener_8.read() html_24 = opener_24.read() rex = r'(?<=div class="content">).*?(?=