image.png
主页面数据页面
image.png
image.png
1.获取200个主页面的网站
2.每个主页面的20个副页面的网站
3.每个副页面的内容
4.保存起来
代码实现
from lxml import etree
import requests
url = "https://www.cnblogs.com/"
url01=url[:-1]
url_List = []
while True:
r = requests.get(url).text
re = etree.HTML(r)
content=re.xpath("//div[@class='pager']/a[last()]/text()")[0]
s_url=re.xpath("//div[@class='pager']/a[last()]/@href")[0]
if content == "Next >":
url = url01 + s_url
url_List.append(url)
else:
break
r = requests.get(url,head).content.decode("utf-8")
#解析
html = etree.HTML(r)
#获取数据(每一篇帖子的url)
list_url = html.xpath('//h3/a/@href')# list_url = html.xpath('//div[@class="post_item_body"]/h3/a/@href')
for x in list_url:
r01 = requests.get(x, head).content.decode("utf-8")
html01 = etree.HTML(r01)
title = html01.xpath('//a[@id="cb_post_title_url"]/text()')
content = html01.xpath("string(//div[@id='cnblogs_post_body'])")
with open("cn-blog.csv", "a+", encoding="utf-8") as file:
file.write(title[0] + "\n")
file.write(content + "\n")
file.write("*" * 50 + "\n")
from lxml import etree
import requests
url = "https://www.cnblogs.com/"
head ={
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'
}
url01=url[:-1]
for x in range(199):
r = requests.get(url).text
re = etree.HTML(r)
s_url=re.xpath("//div[@class='pager']/a[last()]/@href")[0]
url=url01+s_url
list_url = re.xpath('//h3/a/@href')
for x in list_url:
r01 = requests.get(x, head).content.decode("utf-8")
html01 = etree.HTML(r01)
title = html01.xpath('//a[@id="cb_post_title_url"]/text()')
content = html01.xpath("string(//div[@id='cnblogs_post_body'])")
# 保存内容
with open("cn-blog.csv", "a+", encoding="utf-8") as file:
file.write(title[0] + "\n")
file.write(content + "\n")
file.write("*" * 50 + "\n")