python爬蟲利用requests製作代理池s

ckxllf發表於2019-12-04

  網友分享的編碼

  爬取代理然後驗證代理,將可用代理放入txt檔案。

  import requests

  from scrapy import Selector

  start_url = '

  url = '{}.html'

  headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}

  class MyProxy(object):

  def GetPage(self,url):#頁面原始碼獲取

  response = requests.get(url=url,headers=headers)

  text = response.text

  return text

  def GetInfo(self,text):#頁面資訊獲取

  selector = Selector(text=text)

  FindTable = selector.xpath('//div[@class="layui-form"]/table/tbody/tr')

  for proxy in FindTable:

  ip = "".join(proxy.xpath('.//td[1]/text()').get()).replace('\t','').replace('\n','')

  port = "".join(proxy.xpath('.//td[2]/text()').get()).replace('\t','').replace('\n','')

  print(ip,port)

  self.TestIP(ip,port)

  def TabPage(self,text):#切換頁面

  selector = Selector(text=text)

  page = selector.xpath('//*[@id="layui-laypage-1"]/a[8]/@data-page').get()

  self.new_url = url.format(page)

  def TestIP(self,ip,port):

  try:

  response = requests.get(url='{"http":"{}:{}".format(ip,port)})

  print(response.status_code)

  if response.status_code<200 or response.status_code>200:

  print("訪問失敗")

  else: 鄭州人流醫院哪家好

  self.file = open('proxy.txt', 'a+')

  self.file.write('{}:{}\n'.format(ip,port))

  self.file.close()

  except Exception as e:

  print("訪問失敗")

  def close(self):

  self.file.close()

  mypoxy = MyProxy()

  text = mypoxy.GetPage(start_url)

  while True:

  try:

  mypoxy.GetInfo(text)

  mypoxy.GetPage(text)

  text = mypoxy.GetPage(mypoxy.new_url)

  except Exception as e:

  print('**'*10)

  # mypoxy.close()


來自 “ ITPUB部落格 ” ,連結:http://blog.itpub.net/69945560/viewspace-2666808/,如需轉載,請註明出處,否則將追究法律責任。

相關文章