#帖子最后存储的位置
son_path = scrapy.Field()
spider = scrapy.Field()
url = scrapy.Field()
crawled...ExamplePipeline(object):
def process_item(self, item, spider):
# 当前爬取的时间
item["crawled...insert into sina_items(parent_url,parent_title,sub_title,sub_url,sub_file_name,son_url,head,content,crawled...sub_url"], item["sub_file_name"],
item["son_url"], item["head"], item["content"], item["crawled