在middlewares.py文件中,添加下面的代码。
import scrapy
from scrapy import signals
import random
class ProxyMiddleware(object):
def process_request(self, request, spider):
#ip = random.choice(self.ip)
request.meta['Proxy'] = "http://"+"58.246.58.150:9002"
将我们自定义的类添加到下载器中间件设置setting.py中,如下:
DOWNLOADER_MIDDLEWARES = {
'myproject.middlewares.ProxyMiddleware': 543,
}
我们在spider.py文件中写入下面的代码,里边的代理ip可以自定义:
import scrapy
from test1.items import Scrapydemo9Item
class BoleSpider(scrapy.Spider):
name = 'phone'
#allowed_domains = ['www.jihaoba.com']
allowed_domains = []
def start_requests(self):
url = 'http://www.whatismyip.com.tw/'
for i in range(4):
yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
def parse(self,response):
print(response.text)
这里是从ip池调度代理ip的方法:
class ProxyMiddleware(object):
# def __init__(self, ip):
# self.ip = "58.246.58.150:9002"
# @classmethod
# def from_crawler(cls, crawler):
# return cls(ip=crawler.settings.get('PROXIES'))
# pass
user_agents = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60",
"Opera/8.0 (Windows NT 5.1; U; en)",
"Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50",
# Firefox
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
# Safari
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
# chrome
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16"
]
#获取代理的函数
def getIp(self):
a=requests.get("http://127.0.0.1:5010/count/").json().get("count").get("total")
print("数量")
print(a)
b=[]
for i in range(a):
IP=requests.get("http://127.0.0.1:5010/get/").json().get("proxy")
b.append(IP)
d=[]
e=len(b)-1
while e > 0:
e-=1
c=b[e]
thisProxy={
"http":"http://{}".format(c),
"https":"https://{}".format(c)
}
print(thisProxy)
try:
thisIP = "".join(c.split(":")[0:1])
res = requests.get(url="http://httpbin.org/get?show_env=1",timeout=60,proxies=thisProxy)
print("这里是可用的")
f=res.text
fdict=json.loads(f)
count=fdict['origin']
if count==thisIP:
print(count)
d.append(c)
else:
requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(c))
except Exception:
print(c+"这个是要删除的")
requests.get("http://127.0.0.1:5010/delete/?proxy={}".format(c))
return d
def process_request(self, request, spider):
#ip = random.choice(self.ip)
a=self.getIp()
b=random.randint(0,len(a)-1)
print(b)
request.meta['Proxy'] = "http://"+a[b]
#request.headers['User-Agent'] = random.choice(self.user_agents)
request.headers['User-Agent']='Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36'
|