setting
# Scrapy settings for demo project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "demo"
SPIDER_MODULES = ["demo.spiders"]
NEWSPIDER_MODULE = "demo.spiders"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = "demo (+http://www.yourdomain.com)"
# Obey robots.txt rules
LOG_LEVEL="ERROR"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
# "Accept-Language": "en",
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# "demo.middlewares.DemoSpiderMiddleware": 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# "demo.middlewares.DemoDownloaderMiddleware": 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# "scrapy.extensions.telnet.TelnetConsole": None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"demo.pipelines.DemoPipeline": 300,
"demo.pipelines.MysqlPipeline": 301,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = "httpcache"
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"
pipelines
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class DemoPipeline:
fp = None
def open_spider(self, item):
self.fp = open('xcf.txt', 'w', encoding='utf-8')
print('文件被创建')
def close_spider(self, item):
self.fp.close()
print('文件被关闭')
def process_item(self, item, spider):
# return item
href = item['href']
title = item['title']
pl = item['pl']
pc = item['pc']
# item['num'] = num
pe = item['pe']
self.fp.write(href + ' ' + title + ' ' + href + ' ' + pl + ' ' + pc + ' ' + pe + '\n')
return item
import pymysql
class MysqlPipeline:
conn = pymysql.Connect(host='127.0.0.1', port=3306, user='root', password='123456', db='xcf')
cursor = conn.cursor()
def process_item(self, item, spider):
href = item['href']
title = item['title']
pl = item['pl']
pc = item['pc']
pe = item['pe']
sql = 'insert into txcf values ("%s","%s","%s","%s","%s")' % (href, title, pl, pc, pe)
self.cursor.execute(sql)
self.conn.commit()
return item
def close_spider(self, item):
self.cursor.close()
self.conn.close()
item
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class DemoItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
# pass
href = scrapy.Field()
title = scrapy.Field()
pl = scrapy.Field()
pc = scrapy.Field()
# num = scrapy.Field()
pe = scrapy.Field()
item = scrapy.Field()
import csv
import scrapy
from scrapy import cmdline
from demo.items import DemoItem
class XcfSpider(scrapy.Spider):
name = "xcf"
# allowed_domains = ["www.xiachufang.com"]
start_urls = []
# start_urls = ["https://www.xiachufang.com/category/40076/?page=1"]
for i in range(1,20):
start_url=f"https://www.xiachufang.com/category/40076/?page={i}"
start_urls.append(start_url)
# print(start_urls)
# start_urls = ["https://www.baidu.com"]
# def __init__(self, **kwargs):
# # CSV文件路径
# super().__init__(**kwargs)
# self.csvfile = open('items.csv', mode='w', newline='', encoding='utf-8')
# # 创建csv.writer对象
# self.writer = csv.writer(self.csvfile)
# # 写入表头(可选)
# self.writer.writerow(['网址', '菜名', '上传人','评分','七天内尝试人数','配料']) # 根据你的Item结构调整字段名
def parse(self, response,page=1):
# print(response)
li_list = response.xpath('/html/body/div[3]/div/div/div[1]/div[1]/div/div[2]/div[2]/ul/li')
for li in li_list:
href="https://www.xiachufang.com/"+''.join(li.xpath('./div/a/@href').extract())
title = ''.join(li.xpath('./div/div/p[1]/a/text()').extract()).strip() #名称
pl = ' '.join(li.xpath('./div/div/p[2]/a/text()').extract()) #材料
pc = ' '.join(li.xpath('./div/div/p[3]/span[1]/text()').extract()) #评分
num = ' '.join(li.xpath('./div/div/p[3]/span[2]/text()').extract()) #七天内尝试人数
pe = ' '.join(li.xpath('./div/div/p[4]/a/text()').extract()) #上传人
# item1 = {'网址': href, '菜名': title, '上传人': pe,'评分':pc,'七天内尝试人数':pl,'配料':num}
# print(item1)
item=DemoItem()
item['href']=href
item['title'] = title
item['pl'] = pl
item['pc'] = pc
# item['num'] = num
item['pe'] = pe
yield item
# self.writer.writerow(list(item1.values()))
# # '/html/body/div[3]/div/div/div[1]/div[1]/div/div[2]/div[3]/a[4]'
# cmdline.execute("scrapy crawl xcf".split())
标签:self,初步,item,scrapy,html,https,使用,div
From: https://blog.csdn.net/m0_64188466/article/details/142501821