scrapy 入门
1.创建一个scrapy项目 scrapy startproject myspider 2.生成一个爬虫 scrapy genspider itcast itcast.cn 3.提取数据 完善spider,使用xpath等方法 4.保存数据 pipeline中保存数据
解释
class ItcastSpider(scrapy.Spider):
name = 'itcast' #爬虫名字
allowed_domains = ['itcast.cn'] #允许爬取的范围
start_urls = ['https://maoyan.com/board'] #最开始请求的url范围
ret1 = response.xpath("//div[@class='maincon']//h2/text()").extract()
extract解释:提取所有的文字信息。
操作命令
cd +文件名:进入到项目文件内。 tree:查看树。 scrapy crawl itcast:启动爬虫。 dir:查看当前目录中的文件和文件夹;
学习进度
itcast.py文件
①
import scrapy
class ItcastSpider(scrapy.Spider):
name = 'itcast'
allowed_domains = ['itcast.cn']
start_urls = ['https://www.itcast.cn/channel/teacher.shtml']
def parse(self, response):
#处理start_urls地址对应的响应
ret1 = response.xpath("//div[@class='maincon']//h2/text()").extract()
print(ret1)
②
import scrapy
class ItcastSpider(scrapy.Spider):
name = 'itcast'
allowed_domains = ['itcast.cn']
start_urls = ['https://www.itcast.cn/channel/teacher.shtml']
def parse(self, response):
#处理start_urls地址对应的响应
#ret1 = response.xpath("//div[@class='maincon']//h2/text()").extract()
#print(ret1)
#分组
li_list = response.xpath("//div[@class='maincon']//li")
for li in li_list:
item = {}
item["name"] = li.xpath(".//h2/text()").extract()[0]
item["title"] = li.xpath(".//span/text()").extract()[0]
#item["入职时间"] = li.xpath(".//h3/text()").extract()
print(item)
|