newsspider/decspider/middlewares.py

168 lines
6.1 KiB
Python
Raw Normal View History

2024-05-17 13:49:44 +08:00
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
from .myutils import ProxyPool
from .settings import USERNAME, PASSWORD
from faker import Faker
class DecspiderSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesnt have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)
class DecspiderDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)
class ProxyMiddleware:
def __init__(self):
# 初始化代理列表和每个代理的失败计数
self.proxy_pool = ProxyPool()
self.proxy_failures = {proxy: 0 for proxy in self.proxy_pool.proxy_list}
self.fake = Faker()
def process_request(self, request, spider):
# 为每个请求随机选择一个代理
proxy = self.proxy_pool.get_one()
if proxy not in self.proxy_failures:
self.proxy_failures[proxy] = 0
request.meta['proxy'] = "http://%(user)s:%(pwd)s@%(proxy)s/" % {"user": USERNAME, "pwd": PASSWORD, "proxy": proxy}
ua = self.fake.user_agent()
request.headers['User-Agent'] = ua
spider.logger.info(f'Using proxy: {proxy}\nUsing UA: {ua}')
def process_response(self, request, response, spider):
# 如果响应正常,返回响应
if response.status in [200, 301, 302]:
return response
# 如果响应异常,处理失败计数
else:
self._handle_proxy_failure(request.meta['proxy'], spider)
# 重新调度请求
return request
def process_exception(self, request, exception, spider):
# 处理发生异常的请求
self._handle_proxy_failure(request.meta['proxy'], spider)
# 重新调度请求
return request
def _handle_proxy_failure(self, http_proxy, spider):
# 增加指定代理的失败计数
proxy = http_proxy.split('@')[-1][:-1]
self.proxy_failures[proxy] += 1
spider.logger.error(f'Proxy {proxy} failed, failure count: {self.proxy_failures[proxy]}')
# 如果某个代理失败次数达到2次从列表中移除
if self.proxy_failures[proxy] >= 2:
self.proxy_pool.remove(proxy)
del self.proxy_failures[proxy]
spider.logger.error(f'Removed proxy {proxy} after consecutive failures.', level=spider.logger.ERROR)
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)