前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >python scrapy 模拟登录(手动输入验证码)

python scrapy 模拟登录(手动输入验证码)

作者头像
用户5760343
发布2022-01-10 08:15:36
1.2K0
发布2022-01-10 08:15:36
举报
文章被收录于专栏:sktj

scrapy startproject yelloweb vi item.py import scrapy

class YellowebItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() title = scrapy.Field() # 视频标题 link = scrapy.Field() # 视频链接 img = scrapy.Field() # 封面图片链接

vi spiders/yellowbSpider.py import scrapy

class yellowebSpider(scrapy.Spider): name = "webdata" # 爬虫的识别名,它必须是唯一的 allowed_domains = ["91.91p17.space"] def start_requests(self): return [Request("http://91.91p17.space/login.php", callback=self.login, meta={"cookiejar":1})]

代码语言:javascript
复制
 headers={
    "GET /index.php HTTP/1.1"
    "Host": "91.91p17.space",
    "Connection": "keep-alive",
    "Cache-Control": "max-age=0",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
    "Referer": "http://91.91p17.space/login.php",
    "Accept-Encoding": "gzip, deflate",
    "Accept-Language": "zh-CN,zh;q=0.8"
}
def login(self, response):
    print("准备开始模拟登录!")
    captcha_image = response.xpath('//*[@id="safecode"]/@src').extract()
    print(urljoin("http://91.91p17.space", captcha_image[0]))
    if ( len(captcha_image) > 0):
        # 拟定文件名与保存路径
        localpath = "D:\SoftWare\Soft\WorkSpace\Python\scrapy\code\captcha.png"

        opener=urllib.request.build_opener()
        opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
        urllib.request.install_opener(opener)
        urllib.request.urlretrieve(urljoin("http://91.91p17.space", captcha_image[0]), localpath)

        print("此次登录有验证码,请查看本地captcha图片输入验证码:")
        captcha_value = input()
        data = {
            "username": "这里填用户名",
            "password": "这里填密码",
            "fingerprint": "1838373130",
            "fingerprint2": "1a694ef42547498d2142328d89e38c22",
            "captcha_input": captcha_value,
            "action_login": "Log In",
            "x": "54",
            "y": "21"
        }
    else:
        print("登录时没有验证码!代码又写错了!")
    # print(data)
    print("验证码对了!!!!")
    return [FormRequest.from_response(response,
                                      # 设置cookie信息
                                      meta={'cookiejar': response.meta['cookiejar']},
                                      # 设置headers信息模拟浏览器
                                      headers=self.headers,
                                      formdata=data,
                                      callback=self.next
                                      )]

def next(self, response): href = response.xpath('//*[@id="tab-featured"]/div/a/@href').extract() url=urljoin("http://91.91p17.space", href[0]) # print("\n\n\n\n\n\n"+url+"\n\n\n\n\n\n") yield scrapy.http.Request(url, meta={'cookiejar': response.meta['cookiejar']}, # 设置headers信息模拟浏览器 headers=response.headers, callback=self.parse) def parse(self, response): sel = Selector(response) print("进入更多精彩视频了")

代码语言:javascript
复制
    web_list = sel.css('.listchannel')
    for web in web_list:


        item = YellowebItem()
        try:
            item['link'] = web.xpath('a/@href').extract()[0]
            url = response.urljoin(item['link'])
            yield scrapy.Request(url, meta={'cookiejar': response.meta['cookiejar']}, callback=self.parse_content, dont_filter=True)
        except:
            print("完蛋了。。。。")
        # 跳转下一个页面

        href = response.xpath('//*[@id="paging"]/div/form/a[6]/@href').extract()
        nextPage = urljoin("http://91.91p17.space/video.php", href[0])
        print(nextPage)
        if nextPage:
            yield scrapy.http.Request(nextPage, meta={'cookiejar': response.meta['cookiejar']},
                                      # 设置headers信息模拟浏览器
                                      headers=response.headers, callback=self.parse)


def parse_content(self, response):
        try:
            name = response.xpath('//*[@id="head"]/h3/a[1]/text()').extract()[0]

            item = YellowebItem()
            item['link'] = response.xpath('///*[@id="vid"]//@src').extract()[0]
            item['title'] = response.xpath('//*[@id="viewvideo-title"]/text()').extract()[0].strip()
            item['img'] = response.xpath('//*[@id="vid"]/@poster').extract()[0]
            yield item
        except:
            print("完蛋了。。。爬不下来了。。。")

vi pipeline.py import pymysql as db

class YellowebPipeline(object): def init(self): self.con = db.connect(user="root", passwd="root", host="localhost", db="python", charset="utf8") self.cur = self.con.cursor() self.cur.execute('drop table 91pron_content') self.cur.execute("create table 91pron_content(id int auto_increment primary key, title varchar(200), img varchar(244), link varchar(244))")

代码语言:javascript
复制
def process_item(self, item, spider):
    self.cur.execute("insert into 91pron_content(id,title,img,link) values(NULL,%s,%s,%s)", (item['title'], item['img'], item['link']))
    self.con.commit()
    return item

vi settings.py DOWNLOADER_MIDDLEWARES = { 'yelloweb.middlewares.MyCustomDownloaderMiddleware': None, }

scrapy crawl yellowebSpider

本文参与 腾讯云自媒体同步曝光计划,分享自作者个人站点/博客。
原始发表:2019.05.14 ,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 作者个人站点/博客 前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体同步曝光计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
相关产品与服务
验证码
腾讯云新一代行为验证码(Captcha),基于十道安全栅栏, 为网页、App、小程序开发者打造立体、全面的人机验证。最大程度保护注册登录、活动秒杀、点赞发帖、数据保护等各大场景下业务安全的同时,提供更精细化的用户体验。
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档