最新if奖,2021if奖数据出炉,这里本渣渣继续用python对相关数据进行爬取采集,由于是官方网站,展示用,所以几乎没有任何反爬,当然结余是国外网站,会存在访问超时的错误,综合而言,这无疑是一个比较不错的练手网站,推荐老哥们上手试试,学着玩!
对于数据的抓取,一个简单的爬虫的构建一方面需要保证爬虫能够顺利运行,另一方面需要保证数据的抓取正确以及完整,这就需要构建日志(记录)信息文件以及对于运行报错的处理了,这方面需要不断的进行尝试和学习,最好是实例上手去完善。
目标网站:
https://ifworlddesignguide.com/winners/if-design-award-2021-entries/product-design-awards-2021?
产品设计类获奖总共有2026个数据!
通过抓包可以获取到数据来源接口及参数
接口:
https://ifworlddesignguide.com/api/v2/articles/collections/394?cursor=30&lang=en&count=30&orderby=date&filter=%7B%22filters%22:[]%7D
参数:
cursor: 30
lang: en
count: 30
orderby: date
filter: {"filters":[]}
通过多次加载页面(下拉数据)可以获悉cursor: 30为翻页字段数据,首页为0,第二页为30,第三页为60,以此类推可得翻页数据为30*翻页页码!
附上源码参考:
#20210417 if红点奖获取
#微信:huguo00289
# -*- coding: utf-8 -*-
import requests
from fake_useragent import UserAgent
import time
import xlsxwriter
import datetime
from requests.adapters import HTTPAdapter
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=3))
s.mount('https://', HTTPAdapter(max_retries=3))
# 保存数据为excel
def save_spexcel(file_name, data_list):
workbook = xlsxwriter.Workbook('{}.xlsx'.format(file_name)) # 创建一个Excel文件
worksheet = workbook.add_worksheet(file_name)
title = ['id', '奖项','奖项类别','标题','描述','链接','媒体'] # 表格title
worksheet.write_row('A1', title)
for index, data in enumerate(data_list):
num0 = str(index + 2)
row = 'A' + num0
worksheet.write_row(row, data)
workbook.close()
print(f"保存数据为{file_name}.excel表格成功!")
#保存数据为txt
def save_txt(file,data):
with open(f'{file}.txt','a+',encoding='utf-8') as f:
f.write(f'{data}\n')
#获取json数据
def get_data(page):
headers={
'User-Agent':UserAgent().random,
}
cursor=30 * page
print(cursor)
save_txt('if_log', f'{datetime.datetime.now()}:cursor:{cursor}')
url="https://ifworlddesignguide.com/api/v2/articles/collections/394?"
params={
'cursor': cursor, #页码
'lang': 'en',
'count': '30',
'orderby': 'date',
'filter': '{"filters":[]}',
}
#response=requests.get(url=url,params=params,headers=headers,timeout=10)
response = s.get(url=url, params=params, headers=headers, timeout=10)
time.sleep(8)
datas_json=response.json()['data']
if datas_json:
#print(datas_json)
print("存在数据!")
save_txt('if_log', f'{datetime.datetime.now()}:存在数据!')
data_list=[]
for data_json in datas_json:
#print(data_json)
id=data_json['id'] #id号
type=data_json['type'] #奖项
award = data_json['award']['name'] # 奖项类别
headline=data_json['headline'] #名称
description = data_json['description'] # 描述
href=data_json['href'] #链接
media=data_json['media'] #图片
data=id,type,award,headline,description,href,str(media)
print(data)
save_txt('if_data', data)
data_list.append(data)
print(len(data_list))
save_txt('if_log', f'{datetime.datetime.now()}:爬取第{page + 1}页数据成功,获取{len(data_list)}个数据!')
return data_list
def main():
data_list=[]
for page in range(0,71):
try:
print(f">>正在爬取第{page+1}页数据..")
save_txt('if_log', f'{datetime.datetime.now()}:正在爬取第{page+1}页数据..')
data=get_data(page)
data_list.extend(data)
except Exception as e:
print(f"爬取失败,错误代码为:{e}")
save_txt('if_fail', f'{page}')
save_txt('if_log', f'{datetime.datetime.now()}:爬取第{page + 1}页数据失败,错误代码为:{e}!')
file_name='if'
save_spexcel(file_name, data_list)
if __name__=="__main__":
main()
通过前面的源码我们已经获取到相关数据,并保存为excel,这里我们通过读取excel里的详情页面链接数据来直接提取详情页面相关我们想要的数据内容。
附上源码参考:
#20210417 if红点奖详情获取
#微信:huguo00289
# -*- coding: utf-8 -*-
import requests
from lxml import etree
from fake_useragent import UserAgent
import time,os,re
import datetime
from requests.adapters import HTTPAdapter
import threading
import xlrd
s = requests.Session()
s.mount('http://', HTTPAdapter(max_retries=3))
s.mount('https://', HTTPAdapter(max_retries=3))
def now():
now = datetime.datetime.now() #获取当前时间
return now
def gdetail(url):
headers = {
'User-Agent': UserAgent().random,
}
save_txt('detail_log', f'{now()}:正在爬取详情页:{url}')
responese=s.get(url=url,headers=headers, timeout=10)
time.sleep(2)
html=responese.content.decode('utf-8')
tree=etree.HTML(html)
#获取标题
h1s=tree.xpath('//h1[@class="column large-9 page-description"]//text()')
h1=""
for h in h1s:
h=h.strip()
h1=f'{h1}{h}'
print(h1)
#获取类别
h2=tree.xpath('//h2[@class="headline-2 award-box-headline"]/text()')[0]
print(h2)
#创建目录
category=h2.split(':')[-1].strip()
pattern = r"[\/\\\:\*\?\"\<\>\|]"
category =re.sub(pattern, "_", category) # 替换为下划线,去除待存储目录特殊字符
title = re.sub(pattern, "_", h1) # 替换为下划线,去除待存储目录特殊字符
path=f'{category}/{title}'
os.makedirs(path,exist_ok=True)
#获取描述
p=tree.xpath('//p[@class="copy-5"]/text()')[0]
p=p.strip()
print(p)
#获取参数
li1=tree.xpath('//div[@class="profile-text-box-wrapper"]/ul/li[1]/span/text()')
li1=f'{li1[0]}:{li1[1]}'
li2 = tree.xpath('//div[@class="profile-text-box-wrapper"]/ul/li[2]/span/text()')
li2 = f'{li2[0]}:{li2[1]}'
li3 = tree.xpath('//div[@class="profile-text-box-wrapper"]/ul/li[3]/span/text()')
li3 = f'{li3[0]}:{li3[1]}'
li4 = tree.xpath('//div[@class="profile-text-box-wrapper"]/ul/li[4]/span/text()')
li4 = f'{li4[0]}:{li4[1]}'
uls=f'{li1}\n{li2}\n{li3}\n{li4}'
print(uls)
#获取设计师/公司
des=""
divs=tree.xpath('//div[@class="row align-right"]//text()')
for div in divs:
div=div.strip()
if div!='\n' and div!="" and div!="Go to profile":
des=f'{des}\n{div}'
print(des)
#保存文本内容
file=f'{path}/{title}'
data=f'标题:{h1}\n奖项类别:{h2}\n描述:\n{p}\n参数:\n{uls}\n设计师/设计公司:{des}\n'
save_txt(file, data)
#获取图片
imgs=tree.xpath('//div[@class="product-detail-page-images"]//img/@data-src')
print(len(imgs))
save_txt(file, f'\n\n\n图片:\n{imgs}')
print(imgs)
thdownimgs(imgs, path)
save_txt('detail_log', f'{now()}:爬取详情页成功!')
#保存数据为txt
def save_txt(file,data):
with open(f'{file}.txt','a+',encoding='utf-8') as f:
f.write(f'{data}\n')
#多线程下载图片
def thdownimgs(imgs,path):
print(f'>> 正在开启{len(imgs)}个线程下载{len(imgs)}个图片..')
save_txt('detail_log', f'{now()}:>> 正在开启{len(imgs)}个线程下载{len(imgs)}个图片..')
threadings=[]
for img in imgs:
t=threading.Thread(target=downimg,args=(img,path))
threadings.append(t)
t.start()
for x in threadings:
x.join()
print("多线程下载图片完成!")
save_txt('detail_log', f'{now()}:多线程下载图片完成!')
#下载图片
def downimg(img_url,path):
try:
img_name=img_url.split('/')[-1]
headers = {
'User-Agent': UserAgent().random,
}
r=s.get(url=img_url,headers=headers,timeout=8)
with open(f'{path}/{img_name}','wb') as f:
f.write(r.content)
print(f'>> 下载图片 {img_name} 成功!')
save_txt('detail_log', f'{now()}:>> 下载图片 {img_name} 成功!')
except Exception as e:
print(f'下载图片{img_url}出错,错误代码:{e}')
save_txt('detail_log', f'{now()}:下载图片{img_url}出错,错误代码:{e}')
save_txt('detail_img_fail', f'{img_url}@{path}')
#获取链接数据
def get_urls_excel():
file = "if.xlsx"
rbook = xlrd.open_workbook(file)
rbook.sheets()
rsheet = rbook.sheet_by_index(0) # 取第一个工作簿
urls = []
# 循环工作簿的所有行
for row in rsheet.get_rows():
href = row[5]
url = href.value
if url !="链接":
urls.append(url)
print(f'获取链接成功,共获取{len(urls)}个链接!')
save_txt('detail_log', f'{now()}:获取链接成功,共获取{len(urls)}个链接!')
return urls
def main():
urls=get_urls_excel()
for url in urls:
try:
gdetail(url)
except Exception as e:
print(f'获取详情页{url}内容出错,错误代码:{e}')
save_txt('detail_log', f'{now()}:获取详情页{url}内容出错,错误代码:{e}')
save_txt('detail_url_fail', f'{url}')
if __name__=='__main__':
main()
感兴趣可以慢慢看,有疑问可以公众号找到本渣渣微信,私聊我!
感谢点击观看!
剧终!
·················END·················
你好,我是二大爷,
革命老区外出进城务工人员,
互联网非早期非专业站长,
喜好python,写作,阅读,英语
不入流程序,自媒体,seo . . .
公众号不挣钱,交个网友。
读者交流群已建立,找到我备注 “交流”,即可获得加入我们~
听说点 “在看” 的都变得更好看呐~
关注关注二大爷呗~给你分享python,写作,阅读的内容噢~
扫一扫下方二维码即可关注我噢~
本文分享自 Python与SEO学习 微信公众号,前往查看
如有侵权,请联系 cloudcommunity@tencent.com 删除。
本文参与 腾讯云自媒体同步曝光计划 ,欢迎热爱写作的你一起参与!