大家好,又见面了,我是你们的朋友全栈君。
参考视频学习,堆糖网图片爬虫
"""
1.URL
2.模拟浏览器请求资源
3.解析网页
4.保存数据到本地
"""
import requests #第三方库
import urllib.parse
import json
import jsonpath
url ='https://www.duitang.com/napi/blog/list/by_search/?kw={}&start={}'
label = '校花'
label = urllib.parse.quote(label)
#print(label)
num = 0
for index in range(0, 2400, 24):
u = url.format(label, index)
we_data = requests.get(u).text
# print(we_data)
html = json.loads(we_data)
photo = jsonpath.jsonpath(html, "$..path")
print(photo)
for i in photo:
a = requests.get(i)
with open(r'G:\Python\test\图片爬虫\photo\{}.jpg'.format(num), 'wb') as f:
f.write(a.content)
num += 1
以下为自己结合小说爬虫和图片爬虫,对一个图片网站进行的爬取。图片不雅,故网址做了屏蔽,仅供代码学习
import requests
from pyquery import PyQuery
photo = []
num = 3538
def onepage(one_url, oneflag):
response = requests.get(url=one_url)
if response.status_code != 200:
return False
#print(response.text)
doc = PyQuery(response.text)
title = doc('title').text()
print(title)
desc = doc('head > meta:nth-child(6)').attr('content')
print(desc)
imglist = doc('#main > article > div > p a')
#print(imglist)
for dd in imglist.items():
photo.append(dd.attr('href'))
# print(dd.attr('href'))
if oneflag == True:
with open(r'G:\Python\test\图片爬虫\photo3\说明.txt', encoding='utf-8', mode='a+') as f1:
f1.write(title+'\n')
page = doc('#main > article > div > div.page-links a')
for i in page.items():
with open(r'G:\Python\test\图片爬虫\photo3\说明.txt', encoding='utf-8', mode='a+') as f1:
f1.write(i.attr('href')+'\n')
print(i.attr('href'))
onepage(i.attr('href'), False)
with open(r'G:\Python\test\图片爬虫\photo3\说明.txt', encoding='utf-8', mode='a+') as f1:
f1.write('\n\n\n')
for j in range(143, 1000):
url = 'https://******.com/blog/archives/'+str(j)
photo = []
if onepage(url, True)==False:
continue
print(j)
print(photo)
for i in photo:
a = requests.get(i)
with open(r'G:\Python\test\图片爬虫\photo3\{}-{}.jpg'.format(j, num), 'wb') as f2:
f2.write(a.content)
num += 1
发布者:全栈程序员栈长,转载请注明出处:https://javaforall.cn/149759.html原文链接:https://javaforall.cn