標籤:.com ext main 空閑 env 訪問 python nec 串連
#!/usr/bin/env python# -*- coding:utf8 -*-import redis‘‘‘這種串連是串連一次就斷了,耗資源.連接埠預設6379,就不用寫r = redis.Redis(host=‘127.0.0.1‘,port=6379,password=‘tianxuroot‘)r.set(‘name‘,‘root‘)print(r.get(‘name‘).decode(‘utf8‘))‘‘‘‘‘‘串連池:當程式建立資料來源執行個體時,系統會一次性建立多個資料庫連接,並把這些資料庫連接儲存在串連池中,當程式需要進行資料庫訪問時,無需重新建立資料庫連接,而是從串連池中取出一個閒置資料庫連接‘‘‘pool = redis.ConnectionPool(host=‘127.0.0.1‘,password=‘helloworld‘) #實現一個串連池r = redis.Redis(connection_pool=pool)r.set(‘foo‘,‘bar‘)print(r.get(‘foo‘).decode(‘utf8‘))
from bs4 import BeautifulSoupimport requestsfrom lxml import etreeimport redispool = redis.ConnectionPool(host=‘127.0.0.1‘, port=6379)r = redis.Redis(connection_pool=pool)# r = Redis.from_url("redis://127.0.0.1:6379", decode_responses=True)def get_urls(url): result = requests.get(url) selector = etree.HTML(result.text) links = selector.xpath(r‘//*[@id="archive"]/div/div[2]/p[1]/a[1]/@href‘) for link in links: r.sadd("first_urlsss", link) next_url = extract_next_url(result.text) if next_url: get_urls(next_url)def extract_next_url(html): soup = BeautifulSoup(html, "lxml") next_url = soup.select(‘a[class="next page-numbers"]‘) for url in next_url: url = str(url) soup = BeautifulSoup(url, "lxml") next_url = soup.a["href"] return next_urlif __name__ == ‘__main__‘: url = "http://python.jobbole.com/all-posts/" get_urls(url)
python串連redis並插入url