logger.info(' [*] Waiting for messages. To exit press CTRL+C') # 开启RabbitMQ接收模式,这代码会阻塞运行,直到爬虫完成任务,才会继续 channel.start_consuming() # 对一些连接错误进行处理,将继续执行循环,从而完成重连 except pika.exceptions.ConnectionClosed: # Uncomment this to make the example not attempt recovery # from server-initiated connection closure, including # when the node is stopped cleanly # # break continue # Do not recover on channel errors except pika.exceptions.AMQPChannelError as err: print("Caught a channel error: {}, stopping...".format(err)) break # Recover on all other connection errors except pika.exceptions.AMQPConnectionError: print("Connection was closed, retrying...") continue
import pika from crochet import setup from scrapy.crawler import CrawlerRunner from scrapy.utils.log import configure_logging from scrapy.utils.project import get_project_settings # 导入我们自己写的蜘蛛 from mars.spiders.myspider import Spider
import pika from crochet import setup from scrapy.crawler import CrawlerRunner from scrapy.utils.log import configure_logging from scrapy.utils.project import get_project_settings # 导入我们自己写的蜘蛛 from mars.spiders.myspider import Spider
logger.info(' [*] Waiting for messages. To exit press CTRL+C') # 开启RabbitMQ接收模式,这代码会阻塞运行,直到爬虫完成任务,才会继续 channel.start_consuming() # 对一些连接错误进行处理,将继续执行循环,从而完成重连 except pika.exceptions.ConnectionClosed: # Uncomment this to make the example not attempt recovery # from server-initiated connection closure, including # when the node is stopped cleanly # # break continue # Do not recover on channel errors except pika.exceptions.AMQPChannelError as err: print("Caught a channel error: {}, stopping...".format(err)) break # Recover on all other connection errors except pika.exceptions.AMQPConnectionError: print("Connection was closed, retrying...") continue