其他分享
首页 > 其他分享> > 爬取链家北京市二手房的单个房源页的xiangxi信息

爬取链家北京市二手房的单个房源页的xiangxi信息

作者:互联网

1、环境安装

(1)安装Anaconda:

  下载地址:https://www.anaconda.com/products/individual

(2)安装:scrapy

(3)安装:Pycharm

(4)安装:Xpath helper

  教程参考:

    获取插件:https://blog.csdn.net/weixin_41010318/article/details/86472643

    配置过程:https://www.cnblogs.com/pfeiliu/p/13483562.html

2、爬取过程

(1)网页分析:

  网址:

    每个房源页网址都是:https://bj.lianjia.com/ershoufang/  下的房源标题链接;

    上次爬取了每个房源页链接的链接信息,并保存到了json文件中,可以逐个调用文件中的链接信息进行所有页面的爬取;

    

  利用xpath helper 分析出需要爬取的字段:

  

  

  

  

(2)详细代码:

  目录:

  

创建项目:

# 打开Pycharm,并打开Terminal,执行以下命令

scrapy startproject LJbasicInformation
cd LJbasicInformation
scrapy genspider lianjiaBj bj.lianjia.com

 

在scrapy.cfg同级目录,创建run.py,用于启动Scrapy项目,内容如下

# 在项目根目录下新建:run.py
from scrapy.cmdline import execute
# 第三个参数是:爬虫程序名
# --nolog什么意思?
execute(['scrapy', 'crawl', 'lianjiaBj'])
View Code

 

lianjiaBj.py

import scrapy
# -*- coding: utf-8 -*-
from LJbasicInformation.items import LjbasicinformationItem
from selenium.webdriver import ChromeOptions
from selenium.webdriver import Chrome
import json

# 引入文件1max_beijing.json,利用文件内的url网址信息
# 读取各城市的json文件
with open('./url_json/max1_beijing.json', 'r') as file:
    str = file.read()
    data = json.loads(str)


class LianjiabjSpider(scrapy.Spider):
    name = 'lianjiaBj'
    # allowed_domains = ['bj.lianjia.com']
    start_urls = data

    # 实例化一个浏览器对象
    def __init__(self):
        # 防止网站识别Selenium代码
        # selenium启动配置参数接收是ChromeOptions类,创建方式如下:
        options = ChromeOptions()
        # 添加启动参数
        # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
        options.add_argument("--headless")  # => 为Chrome配置无头模式
        # 添加实验性质的设置参数
        # 设置开发者模式启动,该模式下webdriver属性为正常值
        options.add_experimental_option('excludeSwitches', ['enable-automation'])
        options.add_experimental_option('useAutomationExtension', False)
        # 禁用浏览器弹窗
        self.browser = Chrome(options=options)
        # 最新解决navigator.webdriver=true的方法
        self.browser.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": """
                                   Object.defineProperty(navigator, 'webdriver', {
                                     get: () => undefined
                                   })
                                 """
        })

        super().__init__()

    def start_requests(self):
        print("开始爬虫")
        url = self.start_urls
        print("开始接触url")
        print(type(url))
        count = 0
        # 逐个引入1max_beijing.json文件中url信息
        for i in url:
            count += 1
            alone_url = i['BeiJingFangYuan_Id']  # dataNum为 str类型的地址
            # print(alone_url)

            # print("url", url)
            # 访问下一页,有返回时,调用self.parse_details方法
            response = scrapy.Request(url=alone_url, callback=self.parse_details)
            yield response
        print("本次爬取数据: %s条" % count)

        # 一个Request对象表示一个HTTP请求,它通常是在爬虫生成,并由下载执行,从而生成Response
        # url(string) - 此请求的网址
        # callback(callable) - 将使用此请求的响应(一旦下载)作为其第一个参数调用的函数。
        # 如果请求没有指定回调,parse()将使用spider的 方法。请注意,如果在处理期间引发异常,则会调用errback。
        # 仍有许多参数:method='GET', headers, body, cookies, meta, encoding='utf-8', priority=0, dont_filter=False, errback
        # response = scrapy.Request(url=url, callback=self.parse_index)      #
        # yield response

        # 整个爬虫结束后关闭浏览器
        def close(self, spider):
            print("关闭爬虫")
            self.browser.quit()

    ''' # 访问主页的url, 拿到对应板块的response
    def parse_index(self, response):
        print("访问主页")

        for i in range(0, 2970):
            # 下一页url
            url = url[i]['BeiJingFangYuan_Id']
            i += 1
            print("url", url)
            # 访问下一页,有返回时,调用self.parse_details方法
            yield scrapy.Request(url=url, callback=self.parse_details)'''

    def parse_details(self, response):
        try:
            # #基本属性
            # 房屋户型//div[@class="base"]//li[1]/text()
            fangWuHuXing = response.xpath('//div[@class="base"]//li[1]/text()').extract()

            # 建筑面积//div[@class="base"]//li[3]/text()
            jianZhuMianJi = response.xpath('//div[@class="base"]//li[3]/text()').extract()

            # 套内面积//div[@class="base"]//li[5]/text()
            taoNeiMianJi = response.xpath('//div[@class="base"]//li[5]/text()').extract()

            # 房屋朝向//div[@class="base"]//li[7]/text()
            fangWuChaoXiang = response.xpath('//div[@class="base"]//li[7]/text()').extract()

            # 装修情况//div[@class="base"]//li[9]/text()
            zhuangXiuQingKuang = response.xpath('//div[@class="base"]//li[9]/text()').extract()

            # 供暖方式//div[@class="base"]//li[11]/text()
            gongNuanFangShi = response.xpath('//div[@class="base"]//li[11]/text()').extract()

            # 所在楼层//div[@class="base"]//li[2]/text()
            suoZaiLouCeng = response.xpath('//div[@class="base"]//li[2]/text()').extract()

            # 户型结构//div[@class="base"]//li[4]/text()
            huXingJieGou = response.xpath('//div[@class="base"]//li[4]/text()').extract()

            # 建筑类型//div[@class="base"]//li[6]/text()
            jianZhuLeiXing = response.xpath('//div[@class="base"]//li[6]/text()').extract()

            # 建筑结构//div[@class="base"]//li[8]/text()
            jianZhuJieGou = response.xpath('//div[@class="base"]//li[8]/text()').extract()

            # 梯户比例//div[@class="base"]//li[10]/text()
            tiHuBiLi = response.xpath('//div[@class="base"]//li[10]/text()').extract()

            # 配备电梯//div[@class="base"]//li[12]/text()
            peiBeiDianTi = response.xpath('//div[@class="base"]//li[12]/text()').extract()


            # #交易属性
            # 挂牌时间//div[@class="transaction"]//li[1]/span[2]/text()
            guaPaiShiJian = response.xpath('//div[@class="transaction"]//li[1]/span[2]/text()').extract()

            # 上次交易//div[@class="transaction"]//li[3]/span[2]/text()
            shangCiJiaoYi = response.xpath('//div[@class="transaction"]//li[3]/span[2]/text()').extract()

            # 房屋年限//div[@class="transaction"]//li[5]/span[2]/text()
            fangWuNianXian = response.xpath('//div[@class="transaction"]//li[5]/span[2]/text()').extract()

            # 抵押信息//div[@class="transaction"]//li[7]/span[2]/@title
            diYaXinXi = response.xpath('//div[@class="transaction"]//li[7]/span[2]/text()').extract()

            # 交易权属//div[@class="transaction"]//li[2]/span[2]/text()
            jiaoYiQuanShu = response.xpath('//div[@class="transaction"]//li[2]/span[2]/text()').extract()

            # 房屋用途//div[@class="transaction"]//li[4]/span[2]/text()
            fangWuYongTu = response.xpath('//div[@class="transaction"]//li[4]/span[2]/text()').extract()

            # 产权所属//div[@class="transaction"]//li[6]/span[2]/text()
            chanQuanSuoShu = response.xpath('//div[@class="transaction"]//li[6]/span[2]/text()').extract()

            # 房本备件//div[@class="transaction"]//li[8]/span[2]/text()
            fangBenBeiJian = response.xpath('//div[@class="transaction"]//li[8]/span[2]/text()').extract()

            # #房源特色
            # 核心卖点//div[@class="introContent showbasemore"]//div[2]/div[@class="content"]/text()
            heXinMaiDian = response.xpath('//div[@class="introContent showbasemore"]//div[2]/div[@class="content"]/text()').extract()

            # 小区介绍//div[@class="introContent showbasemore"]//div[3]/div[@class="content"]/text()
            xiaoQuJieShao = response.xpath('//div[@class="introContent showbasemore"]//div[3]/div[@class="content"]/text()').extract()

            # 户型介绍//div[@class="introContent showbasemore"]//div[4]/div[@class="content"]/text()
            huXingJieShao = response.xpath('//div[@class="introContent showbasemore"]//div[4]/div[@class="content"]/text()').extract()

            # 周边配套//div[@class="introContent showbasemore"]//div[5]/div[@class="content"]/text()
            zhouBianPeiTao = response.xpath('//div[@class="introContent showbasemore"]//div[5]/div[@class="content"]/text()').extract()


            # #房源标题//h1[@class="main"]/text()
            fangYuanBiaoTi = response.xpath('//h1[@class="main"]/text()').extract()


            # #价格信息
            # 房源总价//div[@class="content"]/div[3]/span[1]/text()
            fangYuanZongJie = response.xpath('//div[@class="content"]/div[3]/span[1]/text()').extract()

            # 平方价格//span[@class="unitPriceValue"]/text()
            pingFangJiaGe = response.xpath('//span[@class="unitPriceValue"]/text()').extract()

            # 建成时间//div[@class="subInfo noHidden"]/text()[1]
            jianChengShiJian = response.xpath('//div[@class="subInfo noHidden"]/text()[1]').extract()


            # #房子信息
            # 小区名称//div[@class="communityName"]/a[1]/text()
            xiaoQuMingCheng = response.xpath('//div[@class="communityName"]/a[1]/text()').extract()

            # 所在区域
            # //div[@class="areaName"]/span[2]/a[1]/text()
            suoZaiQuYu1 = response.xpath('//div[@class="areaName"]/span[2]/a[1]/text()').extract()
            # //div[@class="areaName"]/span[2]/a[2]/text()
            suoZaiQuYu2 = response.xpath('//div[@class="areaName"]/span[2]/a[2]/text()').extract()
            # //div[@class="areaName"]/span[2]/text()
            suoZaiQuYu3 = response.xpath('//div[@class="areaName"]/span[2]/text()').extract()

            # 看房时间//div[@class="visitTime"]/span[2]/text()
            kanFangShiJian = response.xpath('//div[@class="visitTime"]/span[2]/text()').extract()

            # 链家编号//div[@class="houseRecord"]/span[2]/text()
            lianJiaBianHao = response.xpath('//div[@class="houseRecord"]/span[2]/text()').extract()



            # item
            item = LjbasicinformationItem()
            # 基本属性
            item['fangWuHuXing'] = fangWuHuXing
            item['jianZhuMianJi'] = jianZhuMianJi
            item['taoNeiMianJi'] = taoNeiMianJi
            item['fangWuChaoXiang'] = fangWuChaoXiang
            item['zhuangXiuQingKuang'] = zhuangXiuQingKuang
            item['gongNuanFangShi'] = gongNuanFangShi
            item['suoZaiLouCeng'] = suoZaiLouCeng
            item['huXingJieGou'] = huXingJieGou
            item['jianZhuLeiXing'] = jianZhuLeiXing
            item['jianZhuJieGou'] = jianZhuJieGou
            item['tiHuBiLi'] = tiHuBiLi
            item['peiBeiDianTi'] = peiBeiDianTi

            # 交易属性
            item['guaPaiShiJian'] = guaPaiShiJian
            item['shangCiJiaoYi'] = shangCiJiaoYi
            item['fangWuNianXian'] = fangWuNianXian
            item['diYaXinXi'] = diYaXinXi
            item['jiaoYiQuanShu'] = jiaoYiQuanShu
            item['fangWuYongTu'] = fangWuYongTu
            item['chanQuanSuoShu'] = chanQuanSuoShu
            item['fangBenBeiJian'] = fangBenBeiJian

            # 房源特色
            item['heXinMaiDian'] = heXinMaiDian
            item['xiaoQuJieShao'] = xiaoQuJieShao
            item['huXingJieShao'] = huXingJieShao
            item['zhouBianPeiTao'] = zhouBianPeiTao

            # 房源标题
            item['fangYuanBiaoTi'] = fangYuanBiaoTi

            # 价格信息
            item['fangYuanZongJie'] = fangYuanZongJie
            item['pingFangJiaGe'] = pingFangJiaGe
            item['jianChengShiJian'] = jianChengShiJian

            # 房子信息
            item['xiaoQuMingCheng'] = xiaoQuMingCheng
            item['suoZaiQuYu1'] = suoZaiQuYu1
            item['suoZaiQuYu2'] = suoZaiQuYu2
            item['suoZaiQuYu3'] = suoZaiQuYu3
            item['kanFangShiJian'] = kanFangShiJian
            item['lianJiaBianHao'] = lianJiaBianHao

            yield item
        except Exception as e:
            print(e)
View Code

 

items.py

import scrapy

class LjbasicinformationItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    # #基本属性
    # 房屋户型
    fangWuHuXing = scrapy.Field()
    # 建筑面积
    jianZhuMianJi = scrapy.Field()
    # 套内面积
    taoNeiMianJi = scrapy.Field()
    # 房屋朝向
    fangWuChaoXiang = scrapy.Field()
    # 装修情况
    zhuangXiuQingKuang = scrapy.Field()
    # 供暖方式
    gongNuanFangShi = scrapy.Field()
    # 所在楼层
    suoZaiLouCeng = scrapy.Field()
    # 户型结构
    huXingJieGou = scrapy.Field()
    # 建筑类型
    jianZhuLeiXing = scrapy.Field()
    # 建筑结构
    jianZhuJieGou = scrapy.Field()
    # 梯户比例
    tiHuBiLi = scrapy.Field()
    # 配备电梯
    peiBeiDianTi = scrapy.Field()

    # #交易属性
    # 挂牌时间
    guaPaiShiJian = scrapy.Field()
    # 上次交易
    shangCiJiaoYi = scrapy.Field()
    # 房屋年限
    fangWuNianXian = scrapy.Field()
    # 抵押信息
    diYaXinXi = scrapy.Field()
    # 交易权属
    jiaoYiQuanShu = scrapy.Field()
    # 房屋用途
    fangWuYongTu = scrapy.Field()
    # 产权所属
    chanQuanSuoShu = scrapy.Field()
    # 房本备件
    fangBenBeiJian = scrapy.Field()

    # #房源特色
    # 核心卖点
    heXinMaiDian = scrapy.Field()
    # 小区介绍
    xiaoQuJieShao = scrapy.Field()
    # 户型介绍
    huXingJieShao = scrapy.Field()
    # 周边配套
    zhouBianPeiTao = scrapy.Field()

    # #房源标题
    fangYuanBiaoTi = scrapy.Field()

    # #价格信息
    # 房源总价
    fangYuanZongJie = scrapy.Field()
    # 平方价格
    pingFangJiaGe = scrapy.Field()
    # 建成时间
    jianChengShiJian = scrapy.Field()

    # #房子信息
    # 小区名称
    xiaoQuMingCheng = scrapy.Field()
    # 所在区域
    suoZaiQuYu1 = scrapy.Field()
    suoZaiQuYu2 = scrapy.Field()
    suoZaiQuYu3 = scrapy.Field()
    # 看房时间
    kanFangShiJian = scrapy.Field()
    # 链家编号
    lianJiaBianHao = scrapy.Field()
View Code

 

pipelines.py

import codecs, json

class LjbasicinformationPipeline(object):
    def __init__(self):
        # python3保存文件 必须需要'w' 保存为json格式
        # self.f = open("max_BJ_information.json", 'wb')
        self.file = codecs.open('fangyuan_BJ_information.json', 'w', encoding="utf-8")


    def process_item(self, item, spider):
        # 读取item中的数据 并换行处理
        content = json.dumps(dict(item), ensure_ascii=False) + ',\n'
        self.file.write(content)

        return item

    def close_spider(self, spider):
        # 关闭文件
        self.file.close()
View Code

 

修改settings.py,应用pipelines

# Scrapy settings for LJbasicInformation project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'LJbasicInformation'

SPIDER_MODULES = ['LJbasicInformation.spiders']
NEWSPIDER_MODULE = 'LJbasicInformation.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'LJbasicInformation (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'LJbasicInformation.middlewares.LjbasicinformationSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'LJbasicInformation.middlewares.LjbasicinformationDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'LJbasicInformation.pipelines.LjbasicinformationPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
View Code

 

执行run.py,启动爬虫项目,效果如下:

标签:xiangxi,text,链家,response,爬取,item,scrapy,div,class
来源: https://www.cnblogs.com/mrfanqie/p/lianjiaproject_03.html