from scrapy.crawler import CrawlerProcess from scrapy.utils.project import get_project_settings if __name__ == '__main__': process = CrawlerProcess(get_project_settings()) # process.crawl('kexujishubuSpider') # 你需要将此处的spider_name替换为你自己的爬虫名称 # process.crawl('chacewangSpider') # process.crawl('gongyehexinxihuabuSpider')#这个应该得携带上cookie,多试用一下这个 # process.crawl('ziranweiyuanhuiSpider') # process.crawl('huojuzhognxinSpider') # process.crawl('fagaiweiSpider') # process.crawl('wenhuahelvyoubuSpider') # process.crawl('zhongxiaoqiyejuSpider') # process.crawl('cujinjuSpider') # process.crawl('shanxishengkejitingSpider') # process.crawl('sxsshangwutingSpider') # process.crawl('sxgongxintingSpider') # process.crawl('sxzonggaishifanquSpider') # process.crawl('sxfagaiweiSpider') # process.crawl('taiyuankexuejishujuSpider') # process.crawl('taiyuangongyehexinxihuajuSpider') # process.crawl('taiyuangongshangwujuSpider') process.crawl('qicetongspider') process.start()