转载—Python抓取豆瓣电影
短信预约 -IT技能 免费直播动态提醒
#!/usr/bin/python
# -*-coding:utf-8-*-
# Python: 2.7
# Program: 爬取豆瓣电影
from bs4 import BeautifulSoup
import urllib2, json, random, sys
reload(sys)
sys.setdefaultencoding('utf-8')
def get_data(url):
my_headers = [
'Mozilla/5.0 (Windows NT 5.2) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.122 Safari/534.30',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET4.0E; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)',
'Opera/9.80 (Windows NT 5.1; U; zh-cn) Presto/2.9.168 Version/11.50',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET4.0E; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)']
header = {"User-Agent": random.choice(my_headers)}
req = urllib2.Request(url, headers=header)
html = urllib2.urlopen(req).read()
data = json.loads(html)['data']
return data
def get_movieInfo(url):
my_headers = [
'Mozilla/5.0 (Windows NT 5.2) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.122 Safari/534.30',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Trident/4.0; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET4.0E; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)',
'Opera/9.80 (Windows NT 5.1; U; zh-cn) Presto/2.9.168 Version/11.50',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1',
'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.648; .NET CLR 3.5.21022; .NET4.0E; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET4.0C)']
header = {"User-Agent": random.choice(my_headers)}
req = urllib2.Request(url, headers=header)
html = urllib2.urlopen(req).read()
soup = BeautifulSoup(html, 'html.parser')
movie = {}
movie['Name'] = soup.find('span',property="v:itemreviewed").text
movie['Year'] = soup.find('span',class_='year').text
movie['Rate'] = soup.find('strong', property="v:average").text
movie['Runtime'] = soup.find('span', property="v:runtime").text
movie['Summary'] = soup.find('span', property='v:summary').text
movie['URL'] = url
movie['Directors'] = ''
directors = soup.find_all('a', rel="v:directedBy")
for director in directors:
movie['Directors'] += director.text
movie['Directors'] += ' '
movie['Stars'] = ''
stars = soup.find_all('a', rel="v:starring")
for star in stars:
movie['Stars'] += star.text
movie['Stars'] += ' '
movie['Category'] = ''
categorys = soup.find_all('span', property="v:genre")
for category in categorys:
movie['Category'] += category.text
movie['Category'] += ' '
return movie
def get_urls():
base_url = 'https://movie.douban.com/j/new_search_subjects?sort=R&range=1,10&tags=%E7%94%B5%E5%BD%B1&start='
urls=[]
nu = 0
while True:
print nu
url = base_url + str(nu)
data = get_data(url)
if len(data) == 0:
break
for i in data:
urls.append(i['url'])
nu += 20
return urls
if __name__ == '__main__':
urls = get_urls()
f = open('movieinfo.txt','w+')
for url in urls:
try:
movie = get_movieInfo(url)
movie_str = json.dumps(movie,ensure_ascii=False, encoding='UTF-8')
f.write(movie_str)
f.write('\n')
f.flush()
except:
print url
continue
f.close()
摘自: https://blog.51cto.com/wucl202000/1961206,感谢原作者,供学习参考
免责声明:
① 本站未注明“稿件来源”的信息均来自网络整理。其文字、图片和音视频稿件的所属权归原作者所有。本站收集整理出于非商业性的教育和科研之目的,并不意味着本站赞同其观点或证实其内容的真实性。仅作为临时的测试数据,供内部测试之用。本站并未授权任何人以任何方式主动获取本站任何信息。
② 本站未注明“稿件来源”的临时测试数据将在测试完成后最终做删除处理。有问题或投稿请发送至: 邮箱/279061341@qq.com QQ/279061341