前往小程序,Get更优阅读体验!
立即前往
首页
学习
活动
专区
工具
TVP
发布
社区首页 >专栏 >Python-爬取自己博客文章的URL

Python-爬取自己博客文章的URL

作者头像
小小工匠
发布2021-08-16 16:20:44
2300
发布2021-08-16 16:20:44
举报
文章被收录于专栏:小工匠聊架构

Code

代码语言:javascript
复制
# -*- coding:utf8 -*-
import string
import urllib2
import re
import time
import random

class CSDN_Spider:

    def __init__(self,url):
        self.myUrl = url
        self.datas = []
        print u"爬虫已启动...."

    def csdn(self):
        url = self.myUrl + "?viewmode=list"

        user_agents = [
        'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
        'Opera/9.25 (Windows NT 5.1; U; en)',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
        'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
        'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
        "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
        "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
        ]
        agent = random.choice(user_agents)
        req = urllib2.Request(url)
        req.add_header('User-Agent', agent)
        req.add_header('Host', 'blog.csdn.net')
        req.add_header('Accept', '*/*')
        req.add_header('Referer', 'http://blog.csdn.net/djd1234567?viewmode=contents')
        req.add_header('GET', url)

        mypage = urllib2.urlopen(req).read().decode("utf8")
        #print mypage
        Pagenum = self.page_counter(mypage)
        #print Pagenum
        self.find_data(self.myUrl,Pagenum)

    def page_counter(self,mypage):#尾页
        myMatch = re.search(u'/article/list/(\d+?)">尾页',mypage,re.S)

        if myMatch:
            Pagenum = int(myMatch.group(1))
            print u"爬虫报告:发现目录一共%d页" %Pagenum
        else:
            Pagenum = 0
            print u"爬虫报告:没找到页面的数量"

        return Pagenum
    def find_data(self,myurl,Pagenum):

        name = myurl.split("/")
        f = open(name[-1] + '.txt','w+')

        for i in range(1,Pagenum+1):

            print i
            print u"爬虫报告:第%d页正在加载中......" % i

            url = myurl + "/article/list/" + str(i)

            user_agents = [
            'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
            'Opera/9.25 (Windows NT 5.1; U; en)',
            'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
            'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
            'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
            'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
            "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
            "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 ",
            ]
            agent = random.choice(user_agents)

            req = urllib2.Request(url)

            req.add_header('User-Agent', agent)
            req.add_header('Host', 'blog.csdn.net')
            req.add_header('Accept', '*/*')
            req.add_header('Referer', url)
            req.add_header('GET', url)

            mypage = urllib2.urlopen(req).read()

            myItems = re.findall(u'">"/")[-1] + '/article/details/(\d+?)" title="',mypage,re.S)
            print myItems

            for item in myItems:
                self.datas.append("http://blog.csdn.net/yangshangwei/article/details/" + item+"\n")

            #time.sleep(1)

        f.writelines(self.datas)
        f.close()

        print self.datas

        print u"爬虫报告:txt文件生成,请在当前目录查看"

url = "http://blog.csdn.net/yangshangwei"

mySpider = CSDN_Spider(url)
mySpider.csdn()

运行

本文参与 腾讯云自媒体同步曝光计划,分享自作者个人站点/博客。
原始发表:2017/07/12 ,如有侵权请联系 cloudcommunity@tencent.com 删除

本文分享自 作者个人站点/博客 前往查看

如有侵权,请联系 cloudcommunity@tencent.com 删除。

本文参与 腾讯云自媒体同步曝光计划  ,欢迎热爱写作的你一起参与!

评论
登录后参与评论
0 条评论
热度
最新
推荐阅读
目录
  • Code
  • 运行
领券
问题归档专栏文章快讯文章归档关键词归档开发者手册归档开发者手册 Section 归档