1. 创建项目
scrapy startproject 项目名称
2. 进入项目
cd 项目名称
3. 创建爬虫
scrapy genspider 名字 域名
4. 可能需要start_urls,修改成你要抓取的那个页面
进入spiders里面,会看见一个按你创建名字的python文件
例如: scrapy genspider xiao www.4399.com
那么在spiders里面就有一个xiao.py
然后start_urls就是放其实页面url
5. 对数据进行解析,在spider里面的parse(response)方法中进行解析
def parse(self, response): # 该方法默认是用来处理解析的
response.text# 拿到页面源代码
response.xpath()
response.css()
解析数据的时候,需要注意,默认xpath()返回的是selector对象,
想要拿到数据必须使用extrace() 提取数据
extract() 返回列表
extract_first() 返回一个数据
yield 返回数据 -> 把数据交给pipeline来进行持久化存储
6.在pipeline中完成数据的存储
# 记住:管道默认是不生效的,需要取settings里面取开启管道
class 类名:
# 处理数据的方法,item:数据,spider:爬虫
def process_item(self, item, spider):
item:数据
spider:爬虫
return item #必须要return东西,否则下一个管道收不到数据
7.设置settings.py文件将pipeline进行生效设置
ITEM_PIPELINES = {
# key就是管道的路径
# value就是管道的优先级
'管道的路径': 优先级, 优先级级越低,优先级越高
例:"game.pipelines.GamePipeline": 300,
}
# 在settings.py里面添一条下面的语句,那个警告以下的日志就不会出现
LOG_LEVEL = 'WARNING' # 警告及警告以上才出现
8.运行爬虫
scrapy crawl 爬虫的名字
例: scrapy crawl xiao
xiao.py
import scrapy
class XiaoSpider(scrapy.Spider):
name = "xiao" # 爬虫名字
allowed_domains = ["www.4399.com"] # 允许的域名
start_urls = ["https://www.4399.com/"] # 起始页面url
def parse(self, response): # 该方法默认是用来处理解析的
# 本来是用来解析的
# print(response)
# 拿到页面源代码
# print(response.text)
# 获取页面所有中游戏名字
# txt = response.xpath('//*[@id="skinbody"]/div[10]/div[1]/div[1]/ul/li/a/text()').extract() # 提取内容
# print(txt)
# 分块提取数据
li_list = response.xpath('//*[@id="skinbody"]/div[10]/div[1]/div[1]/ul/li')
for li in li_list:
# 如果在后面添[0]取出来成字符串的话,如果该元素是空的就会报错
# name = li.xpath('./a/text()').extract()[0]
# extract_first提取一项内容,如果没有,返回None
name = li.xpath('./a/text()').extract_first()
# print(name)
# break # 测试
dic = {
'name':name
}
# 需要用yield将数据传递给管道
yield dic # 如果返回的是数据,直接可以认为是给了管道pipelines
pipelines.py
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
# 记住:管道默认是不生效的,需要取settings里面取开启管道
class GamePipeline:
# 处理数据的方法,item:数据,spider爬虫
def process_item(self, item, spider):
print(item)
print(spider.name)
return item
class NewPipeline:
# 处理数据的方法,item:数据,spider爬虫
def process_item(self, item, spider):
item['love'] = '陶喆'
return item
settings.py
# Scrapy settings for game project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "game"
SPIDER_MODULES = ["game.spiders"]
NEWSPIDER_MODULE = "game.spiders"
LOG_LEVEL = 'WARNING' # 警告及警告以上才出现
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = "game (+http://www.yourdomain.com)"
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
# "Accept-Language": "en",
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# "game.middlewares.GameSpiderMiddleware": 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# "game.middlewares.GameDownloaderMiddleware": 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# "scrapy.extensions.telnet.TelnetConsole": None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
# key就是管道的路径
# value就是管道的优先级
"game.pipelines.GamePipeline": 300,
"game.pipelines.NewPipeline": 299, # 数字越低优先级越高
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = "httpcache"
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"
标签:en,settings,docs,基础,item,scrapy,html,scarpy
From: https://www.cnblogs.com/Wesuiliye/p/17190014.html