前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >Python-爬虫小计

Python-爬虫小计

作者头像
py3study
发布2020-01-17 11:29:11
3360
发布2020-01-17 11:29:11
举报
文章被收录于专栏:python3python3
代码语言:javascript
复制
# -*-coding:utf8-*-
import requests
from bs4 import BeautifulSoup
import time
import os
import urllib
import re
import json


requests.packages.urllib3.disable_warnings()

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'
}
proxies = {"http": "**********************",
           "https": "********************8"}
def get_bs(url):
    res = requests.get(url, proxies=proxies,headers=headers,verify=False)
    bs = BeautifulSoup(res.content, 'lxml')
    return bs

def get_first_url():
    first_url_list = []
    page = 1
    for i in range(page):
        root_url =  "https://www.model61.com/mold.php?page={}".format(str(i+1))
        bs = get_bs(root_url)
        for i in  bs.select("dt a"):
            src = i.get('href')
            if "php" in src:
                first_url = "https://www.model61.com/{}".format(src)
                first_url_list.append(first_url)
    return first_url_list

def get_second_url(first_url):
    data = {}
    bs = get_bs(first_url)
    for i in bs.select(".cont-top a"):
        src = i.get('href')
        if "album_s" in src:
            second_url = "https://www.model61.com/{}".format(src)
            #print("second_url",second_url)
            data["second_url"] = second_url

    for j in bs.select(".content_center_date"):
        data["identity"] = j.get_text()
    return data


def get_thred_url(second_url):
    bs = get_bs(second_url)
    for i in  bs.select("dt a"):
        src = i.get('href')
        if "album_list" in src:
            thred_url = "https://www.model61.com/{}".format(src)
            #print("thred_url", thred_url)
            return thred_url


def get_image_list(thred_url):
    image_list = []
    bs = get_bs(thred_url)
    for i in bs.select(".album_list_left a")+bs.select(".album_list_right a"):
        src = i.get('href')
        image_path = "https://www.model61.com/{}".format(src)
        image_list.append(image_path)
        #print("image_path",image_path)
    return image_list

def download_image(image_path,image_url):
    try:
        r = requests.get(image_url, proxies=proxies, headers=headers, verify=False, allow_redirects=False)
        with open(image_path, 'wb') as f:
            f.write(r.content)
    except Exception as e:
        print(e)

def create_face_id(data):
    save_path = r""
    identity = data["identity"]
    ld_list = identity.split("\n")
    identity = ld_list[1] + '_' + ld_list[3][4:] + "_" + ld_list[7][6:] + '_' + ld_list[8][4:]
    print(identity)
    identity_path = os.path.join(save_path, identity)
    if not os.path.exists(identity_path):
        os.mkdir(identity_path)
    for image_url in data['image_list']:
        image_path = os.path.join(identity_path, '{}.jpg'.format(str(int(time.time() * 1000))))
        download_image(image_path, image_url)


if __name__ == '__main__':

    first_url_list = get_first_url()
    for first_url in first_url_list:
        try:
            data = get_second_url(first_url)
            print(data)
            second_url = data['second_url']
            thred_url = get_thred_url(second_url)
            image_list = get_image_list(thred_url)
            data["image_list"] = image_list
            create_face_id(data)
        except Exception as e:
            print(first_url,e)
本文参与 腾讯云自媒体分享计划,分享自作者个人站点/博客。
原始发表:2019-05-06 ,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 作者个人站点/博客 前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体分享计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档