scrapy startproject doubanSpider cd ssqSpider scrapy genspider douban douban.com
1.2 创建爬虫
scrapy genspider douban "douban.com"
1.3 添加爬虫模型
import scrapy class DoubanMovieItem(scrapy.Item): # define the fields for your item here like: # name = scrapy.Field() title = scrapy.Field() #标题 stars = scrapy.Field() #分数 subject = scrapy.Field() #主题 pass
1.4 修改爬虫代码,列表页的多页数据为例子
1.5.1 数据为json文档时使用response.json()
import scrapy from scrapy.http import HtmlResponse from scrapy import Request from ssqSpider.items import SsqspiderItem class SsqSpider(scrapy.Spider): name = "ssq" allowed_domains = ["www.cwl.gov.cn"] start_urls = ["https://cwl.gov.cn"] # start_urls = ["http://www.cwl.gov.cn/cwl_admin/front/cwlkj/search/kjxx/findDrawNotice?name=ssq&issueCount=&issueStart=&issueEnd=&dayStart=&dayEnd=&pageNo=1&pageSize=30&week=&systemType=PC"] def start_requests(self): for page in range(1,55): ## 1-54页,range为左闭右开 url=f'http://www.cwl.gov.cn/cwl_admin/front/cwlkj/search/kjxx/findDrawNotice?name=ssq&issueCount=&issueStart=&issueEnd=&dayStart=&dayEnd=&pageNo={page}&pageSize=30&week=&systemType=PC' yield Request(url=url,dont_filter=True) # 如果有302跳转加入dont_filter=True def parse(self, response:HtmlResponse): # print(response.json()) data=response.json() result=data['result'] for i in result: item=SsqspiderItem() # print(i) item['qihao']=i['code'] item['riqi']=i['date'] item['kaijianghaoma_red']=i['red'] item['kaijianghaoma_blue']=i['blue'] item['jiangchijiner']=i['poolmoney'] item['xiaoshouer']=i['sales'] yield item