练手实例:Scrapy爬取一本完整小说
戳这里查看此小说
整体都很简单,没啥多说的,10分钟搞定
外循环在主页面找url进行拼接,小循环解析详细页内容提取小说文本。
biquge.py
import scrapy
from scrapy.selector import Selector
class BiqugeSpider(scrapy.Spider):
name = 'biquge'
allowed_domains = ['biquge.info']
start_urls = ['https://www.biquge.info/34_34370/']
def parse(self, response):
selector = Selector(response)
find_all = selector.xpath("/html/body/div[@id='wrapper']/div[@class='box_con'][2]/div[@id='list']/dl/dd")
for section in find_all:
href = section.xpath('.//@href').extract_first()
real_url = response.urljoin(href)
request = scrapy.Request(real_url,callback=self.parse_detail)
yield request
# print(real_url)测试成功
def parse_detail(self,response):
selector = Selector(response)
content_list = selector.xpath('//div[@id="content"]/text()').extract()
# for real in content:
# print(real)测试成功
content = '\n'.join(content_list)
item = dict()
item['content'] = content
print(content)
yield item
class XiaoshuoPipeline(object):
def __init__(self):
self.file = open('D://女帝直播攻略.txt','a+',encoding='utf-8')
def process_item(self, item, spider):
self.file.write(item['content'])
return item
LOG_LEVEL = 'WARNING'
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
结果
真的不得不佩服scrapy的速度,太猛了,上了个厕所就爬完了一千七百多章。。