1 # -*- conding=utf-8 -*-
2
3 import requests
4 from bs4 import BeautifulSoup
5 import io
6
7 url = "https://www.mzitu.com/164871"
8 #Referer = ? 是模拟电脑操作
9 headers = {"Referer":"https://www.mzitu.com/164871","User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36"}
10 html = requests.get(url)
11 soup = BeautifulSoup(html.content,'lxml')
12 text = soup.find_all("span")[10].text
13 print(text)
14 title = soup.find("h2",class_='main-title').text
15 #获取图片的地址
16 for i in range(1,10):
17 #herf 为访问的地址
18 herf = url +'/'+ str(i)
19 #在次解析新的url(这个url就是妹子的连接)
20 html = requests.get(herf,headers=headers)
21 beautiful = BeautifulSoup(html.text,'lxml')
22 #获取妹子的图片连接
23 pic_url = beautiful.find('img',alt=title)
24 print(pic_url)
25 html = requests.get(pic_url['class="lazy" data-src'],headers=headers)
26 # print(html.content)
27 # file_name = pic_url['class="lazy" data-src'].split(r'/')[-1]
28 "
29 # print(file_name)
30
31 f = open(str(i)+'.jpg','wb') # 名称
32 f.write(html.content) #写入图片
33 f.close()