博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
40.配置完善爬虫代码文件及图片下载文件重命名问题-1
阅读量:7017 次
发布时间:2019-06-28

本文共 10885 字,大约阅读时间需要 36 分钟。

这里是直接拿之前未配置代码做修改,主要问题就是在piplines中添加修改配置文件(先解决文件下载问题,图片其实也类似)
效果如图:

北京造价信息网站爬虫bjjs.py# -*- coding: utf-8 -*-import scrapyimport refrom bjjs_web.items import BjjsWebItemclass BjjsSpider(scrapy.Spider):    name = 'bjjs'    allowed_domains = ['www.bjjs.gov.cn']    base_url=['http://www.bjjs.gov.cn']    start_urls = ['http://www.bjjs.gov.cn/bjjs/gcjs/zczjxx/zjxx/48843934-1.shtml']    custom_settings = {        "DOWNLOAD_DELAY": 0.5,        "ITEM_PIPELINES": {            'bjjs_web.pipelines.MysqlPipeline': 320,            'bjjs_web.pipelines.MyFilePipeline': 321,        },        "DOWNLOADER_MIDDLEWARES": {             'bjjs_web.middlewares.BjjsWebDownloaderMiddleware': 500,        },    }    def parse(self, response):        # tag_list=response.xpath("//div[@class='list_box']/ul/li/a/@href").extract()        # for tag in tag_list:        #     url=self.base_url[0]+tag        #     print(url)        # print('*'*100)        #正则匹配url,标题,时间        tag_list=re.findall('
  • (.*?)(.*?)
  • ',response.text) # print(tag_list) for tag in tag_list: # print(tag) item=BjjsWebItem() url=self.base_url[0]+tag[0] print(url) item['file_urls']=[url] title=tag[1] print(title) item['title']=title time=tag[2] print(time) item['time']=time print('*'*100) yield item #翻页,获取页面总数 page_num=re.findall(r'
  • (.*?), (.*?)
  • items.py# -*- coding: utf-8 -*-# Define here the models for your scraped items## See documentation in:# https://doc.scrapy.org/en/latest/topics/items.htmlimport scrapyclass BjjsWebItem(scrapy.Item):    # define the fields for your item here like:    # name = scrapy.Field()    file_urls=scrapy.Field()    title=scrapy.Field()    time=scrapy.Field()
    middlewares.py# -*- coding: utf-8 -*-# Define here the models for your spider middleware## See documentation in:# https://doc.scrapy.org/en/latest/topics/spider-middleware.htmlfrom scrapy import signalsclass BjjsWebSpiderMiddleware(object):    # Not all methods need to be defined. If a method is not defined,    # scrapy acts as if the spider middleware does not modify the    # passed objects.    @classmethod    def from_crawler(cls, crawler):        # This method is used by Scrapy to create your spiders.        s = cls()        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)        return s    def process_spider_input(self, response, spider):        # Called for each response that goes through the spider        # middleware and into the spider.        # Should return None or raise an exception.        return None    def process_spider_output(self, response, result, spider):        # Called with the results returned from the Spider, after        # it has processed the response.        # Must return an iterable of Request, dict or Item objects.        for i in result:            yield i    def process_spider_exception(self, response, exception, spider):        # Called when a spider or process_spider_input() method        # (from other spider middleware) raises an exception.        # Should return either None or an iterable of Response, dict        # or Item objects.        pass    def process_start_requests(self, start_requests, spider):        # Called with the start requests of the spider, and works        # similarly to the process_spider_output() method, except        # that it doesn’t have a response associated.        # Must return only requests (not items).        for r in start_requests:            yield r    def spider_opened(self, spider):        spider.logger.info('Spider opened: %s' % spider.name)class BjjsWebDownloaderMiddleware(object):    # Not all methods need to be defined. If a method is not defined,    # scrapy acts as if the downloader middleware does not modify the    # passed objects.    @classmethod    def from_crawler(cls, crawler):        # This method is used by Scrapy to create your spiders.        s = cls()        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)        return s    def process_request(self, request, spider):        # Called for each request that goes through the downloader        # middleware.        # Must either:        # - return None: continue processing this request        # - or return a Response object        # - or return a Request object        # - or raise IgnoreRequest: process_exception() methods of        #   installed downloader middleware will be called        return None    def process_response(self, request, response, spider):        # Called with the response returned from the downloader.        # Must either;        # - return a Response object        # - return a Request object        # - or raise IgnoreRequest        return response    def process_exception(self, request, exception, spider):        # Called when a download handler or a process_request()        # (from other downloader middleware) raises an exception.        # Must either:        # - return None: continue processing this exception        # - return a Response object: stops process_exception() chain        # - return a Request object: stops process_exception() chain        pass    def spider_opened(self, spider):        spider.logger.info('Spider opened: %s' % spider.name)
    piplines.py# -*- coding: utf-8 -*-# Define your item pipelines here## Don't forget to add your pipeline to the ITEM_PIPELINES setting# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.htmlfrom scrapy.conf import settingsfrom scrapy.exceptions import DropItemfrom scrapy.pipelines.files import FilesPipelineimport pymysqlimport scrapyclass BjjsWebPipeline(object):    def process_item(self, item, spider):        return item# 数据保存mysqlclass MysqlPipeline(object):    def open_spider(self, spider):        self.host = settings.get('MYSQL_HOST')        self.port = settings.get('MYSQL_PORT')        self.user = settings.get('MYSQL_USER')        self.password = settings.get('MYSQL_PASSWORD')        self.db = settings.get(('MYSQL_DB'))        self.table = settings.get('TABLE')        self.client = pymysql.connect(host=self.host, user=self.user, password=self.password, port=self.port, db=self.db, charset='utf8')    def process_item(self, item, spider):        item_dict = dict(item)        cursor = self.client.cursor()        values = ','.join(['%s'] * len(item_dict))        keys = ','.join(item_dict.keys())        sql = 'INSERT INTO {table}({keys}) VALUES ({values})'.format(table=self.table, keys=keys, values=values)        try:            if cursor.execute(sql, tuple(item_dict.values())):  # 第一个值为sql语句第二个为 值 为一个元组                print('数据入库成功!')                self.client.commit()        except Exception as e:            print(e)            print('数据已存在!')            self.client.rollback()        return item    def close_spider(self, spider):        self.client.close()#定义下载class MyFilePipeline(FilesPipeline):    def get_media_requests(self, item, info):        for file_url in item['file_urls']:            yield scrapy.Request(file_url,  meta = {
    'item': item}) def file_path(self, request, response=None, info=None): item = request.meta['item'] filename = u'full/{}.pdf'.format(item['title']) return filename def item_completed(self, results, item, info): image_paths = [x['path'] for ok, x in results if ok] if not image_paths: raise DropItem("Item contains no file_urls") item['file_urls'] = image_paths return item
    setting.py# -*- coding: utf-8 -*-# Scrapy settings for bjjs_web project## For simplicity, this file contains only settings considered important or# commonly used. You can find more settings consulting the documentation:##     https://doc.scrapy.org/en/latest/topics/settings.html#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#     https://doc.scrapy.org/en/latest/topics/spider-middleware.htmlBOT_NAME = 'bjjs_web'SPIDER_MODULES = ['bjjs_web.spiders']NEWSPIDER_MODULE = 'bjjs_web.spiders'FILES_STORE = './download'# Crawl responsibly by identifying yourself (and your website) on the user-agent#USER_AGENT = 'bjjs_web (+http://www.yourdomain.com)'# Obey robots.txt rulesROBOTSTXT_OBEY = True# mysql配置参数MYSQL_HOST = "172.16.10.157"MYSQL_PORT = 3306MYSQL_USER = "root"MYSQL_PASSWORD = "123456"MYSQL_DB = 'web_datas'TABLE = "web_bjjs"# Configure maximum concurrent requests performed by Scrapy (default: 16)#CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay# See also autothrottle settings and docs#DOWNLOAD_DELAY = 3# The download delay setting will honor only one of:#CONCURRENT_REQUESTS_PER_DOMAIN = 16#CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)#COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)#TELNETCONSOLE_ENABLED = False# Override the default request headers:#DEFAULT_REQUEST_HEADERS = {
    # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',# 'Accept-Language': 'en',#}# Enable or disable spider middlewares# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html#SPIDER_MIDDLEWARES = {
    # 'bjjs_web.middlewares.BjjsWebSpiderMiddleware': 543,#}# Enable or disable downloader middlewares# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.htmlDOWNLOADER_MIDDLEWARES = { 'bjjs_web.middlewares.BjjsWebDownloaderMiddleware': 543,}# Enable or disable extensions# See https://doc.scrapy.org/en/latest/topics/extensions.html#EXTENSIONS = {
    # 'scrapy.extensions.telnet.TelnetConsole': None,#}# Configure item pipelines# See https://doc.scrapy.org/en/latest/topics/item-pipeline.htmlITEM_PIPELINES = { # 下载文件管道 'scrapy.pipelines.MyFilePipeline': 1, 'bjjs_web.pipelines.BjjsWebPipeline': 300,}# Enable and configure the AutoThrottle extension (disabled by default)# See https://doc.scrapy.org/en/latest/topics/autothrottle.html#AUTOTHROTTLE_ENABLED = True# The initial download delay#AUTOTHROTTLE_START_DELAY = 5# The maximum download delay to be set in case of high latencies#AUTOTHROTTLE_MAX_DELAY = 60# The average number of requests Scrapy should be sending in parallel to# each remote server#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0# Enable showing throttling stats for every response received:#AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings#HTTPCACHE_ENABLED = True#HTTPCACHE_EXPIRATION_SECS = 0#HTTPCACHE_DIR = 'httpcache'#HTTPCACHE_IGNORE_HTTP_CODES = []#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

     

    posted on
    2018-10-24 11:32 阅读(
    ...) 评论(
    ...)

    转载于:https://www.cnblogs.com/lvjing/p/9842608.html

    你可能感兴趣的文章
    传染病模型python_传染病动力学模型 SI => SIS => SIR => SEIR(python)
    查看>>
    html天气预报代码_接口测试平台代码实现114:登录态接口10
    查看>>
    删除mysql数据库后遗留_mysql数据库被删除后怎么恢复
    查看>>
    mysql一对多怎么聚合多_mysql多对多
    查看>>
    mysql 并发控制_mysql并发控制
    查看>>
    mysql处理高并发的架构_mysql高可用架构设计,处理高并发,大流量!
    查看>>
    pdo_mysql断线重连_Yii2实现Mysql断线重连
    查看>>
    mysql 创建视图的好处_mysql视图的作用和优点,视图可以更改么?
    查看>>
    mysql uuid 和int_MySQL之-uuid做主键与int做主键的性能实测对比详解
    查看>>
    实验三lr1分析法java_Java中9种常见的CMS GC问题分析与解决(四)
    查看>>
    hive数据库存入mysql_hive 数据库操作
    查看>>
    mysql 取30行_sql分页取30-40条记录
    查看>>
    java递归mysql生成树_java递归生成树结构的数据
    查看>>
    kettle获取当前日期_kettle获取系统时间
    查看>>
    spark写mysql优化简书_Spark SQL:性能优化
    查看>>
    mysql gtid 主主_MySQL优化之七--Mysql基于GTID的主从复制
    查看>>
    python字节码文件后缀_Python反编译之字节码
    查看>>
    gdb加载python_gdb加载python脚本的方法
    查看>>
    let 指定长度的数组_怎样在JavaScript中创建和填充任意长度的数组 [每日前端夜话0x29]...
    查看>>
    python格式化转换_Python进制转换format格式化
    查看>>