Content-Length: 16550 | pFad | http://github.com/cwjokaka/ok_ip_proxy_pool/commit/843d95212f856489baff4ba687930612eebc7a50.diff

91 diff --git a/main.py b/main.py index e43b383..c52cd62 100644 --- a/main.py +++ b/main.py @@ -1,18 +1,15 @@ -import asyncio import typing -from asyncio import AbstractEventLoop from src.database.memory_db import db_collection from src.entity.proxy_entity import ProxyEntity from src.spider.spiders import spider_collection from setting import SPIDER_LIST, DB_CONFIG -from threading import Thread -def crawl(event_loop: AbstractEventLoop): +def crawl(): proxies = [] for spider_name in SPIDER_LIST: - proxies.extend(spider_collection[spider_name].crawl(event_loop)) + proxies.extend(spider_collection[spider_name].crawl()) return proxies @@ -22,21 +19,12 @@ def save(proxies: typing.List[ProxyEntity]): db.set(f'{proxy.ip}:{proxy.port}', proxy) -def start_event_loop(loop): - def init_loop(_loop): - asyncio.set_event_loop(_loop) - _loop.run_forever() - loop_thread = Thread(target=init_loop, args=(loop,)) - loop_thread.setDaemon(True) - loop_thread.start() -if __name__ == '__main__': - new_loop = asyncio.new_event_loop() - start_event_loop(new_loop) +if __name__ == '__main__': + proxies = crawl() # 爬取 - proxies = crawl(new_loop) + save(proxies) # 持久化 - # save(proxies) print() diff --git a/setting.py b/setting.py index 6537df5..922aa15 100644 --- a/setting.py +++ b/setting.py @@ -7,8 +7,8 @@ SPIDER_LIST = [ 'Spider66Ip', - # 'SpiderQuanWangIp', - # 'SpiderXiciIp' + 'SpiderQuanWangIp', + 'SpiderXiciIp' ] # 爬虫请求头 diff --git a/src/spider/abs_spider.py b/src/spider/abs_spider.py index 52a0fc4..8752501 100644 --- a/src/spider/abs_spider.py +++ b/src/spider/abs_spider.py @@ -1,7 +1,4 @@ -import asyncio -from asyncio import AbstractEventLoop -from collections import Coroutine -from typing import List, Iterable +from typing import List from src.entity.proxy_entity import ProxyEntity @@ -11,17 +8,14 @@ class AbsSpider(object): def __init__(self, name='unknown') -> None: self._name = name - def crawl(self, event_loop: AbstractEventLoop): + def crawl(self): print(f'{self._name}开始爬取...') - # self.do_crawl() - # print(type(self.do_crawl())) - # print(isinstance(self.do_crawl(), Coroutine)) - self.do_crawl(event_loop) - # print(f'{self._name}爬取完毕!共:{len(res)}个代理') + res = self.do_crawl() + print(f'{self._name}爬取完毕!共:{len(res)}个代理') # todo 持久化到数据库 - # return ful.result() + return res - def do_crawl(self, event_loop) -> Iterable[ProxyEntity]: - raise NotImplementedError + def do_crawl(self) -> List[ProxyEntity]: + raise RuntimeError('do_crawl方法没有实现!') diff --git a/src/spider/spiders.py b/src/spider/spiders.py index 1436f78..099dc63 100644 --- a/src/spider/spiders.py +++ b/src/spider/spiders.py @@ -1,11 +1,6 @@ -import asyncio -from asyncio import AbstractEventLoop -from collections import Coroutine -from typing import List, Iterable +from typing import List -import aiohttp -# import requests -from aiohttp import ClientSession +import requests from setting import HEADERS from src.entity.proxy_entity import ProxyEntity @@ -31,49 +26,31 @@ class Spider66Ip(AbsSpider): """ def __init__(self) -> None: super().__init__('66IP代理爬虫') - self._base_url = 'http://www.66ip.cn/{}' + self._base_url = 'http://www.66ip.cn' - def do_crawl(self, event_loop: AbstractEventLoop) -> Iterable[ProxyEntity]: - tasks = [] + def do_crawl(self) -> List[ProxyEntity]: + result = [] for page in range(1, 5): - url = self._base_url.format(page) - task = asyncio.ensure_future(self._request(url)) - tasks.append(task) - # for page in range(1, 5): - # task = asyncio.ensure_future(hello(self._base_url.format(page))) - - event_loop.call_soon_threadsafe(tasks) - - # for page in range(1, 5): - # # print(f'第{page}页...') - # # resp = requests.get(f'{self._base_url}/{page}.html') - # async with aiohttp.ClientSession() as session: - # async with session.get(f'{self._base_url}/{page}.html') as resp: - # # await aiohttp - # resp.encoding = 'gb2312' - # soup = BeautifulSoup(await resp.text(), 'lxml') - # tr_list = soup.find('table', attrs={'width': '100%', 'bordercolor': '#6699ff'}).find_all('tr') - # for i, tr in enumerate(tr_list): - # if i == 0: - # continue - # contents = tr.contents - # ip = contents[0].text - # port = contents[1].text - # region = contents[2].text - # proxy_cover = contents[3].text - # # check_time = contents[4].text - # # print(f'{ip}:{port}/{region}/{proxy_type}/{check_time}') - # yield ProxyEntity(ip, port, - # source=self._name, - # proxy_cover=self._judge_proxy_cover(proxy_cover), - # region=region) - - - async def _request(self, url): - async with ClientSession() as session: - async with session.get(url) as response: - response = await response.read() - print(response) + # print(f'第{page}页...') + resp = requests.get(f'{self._base_url}/{page}.html') + resp.encoding = 'gb2312' + soup = BeautifulSoup(resp.text, 'lxml') + tr_list = soup.find('table', attrs={'width': '100%', 'bordercolor': '#6699ff'}).find_all('tr') + for i, tr in enumerate(tr_list): + if i == 0: + continue + contents = tr.contents + ip = contents[0].text + port = contents[1].text + region = contents[2].text + proxy_cover = contents[3].text + # check_time = contents[4].text + # print(f'{ip}:{port}/{region}/{proxy_type}/{check_time}') + result.append(ProxyEntity(ip, port, + source=self._name, + proxy_cover=self._judge_proxy_cover(proxy_cover), + region=region)) + return result def _judge_proxy_cover(self, cover_str: str): @@ -83,130 +60,132 @@ def _judge_proxy_cover(self, cover_str: str): return ProxyCoverEnum.UNKNOWN -# @spider_register -# class SpiderQuanWangIp(AbsSpider): -# """ -# 全网IP代理爬虫 刷新速度:极快 -# http://www.goubanjia.com/ -# """ -# def __init__(self) -> None: -# super().__init__('全网IP代理爬虫') -# self._base_url = 'http://www.goubanjia.com' -# -# async def do_crawl(self) -> Iterable[ProxyEntity]: -# resp = requests.get(self._base_url, headers=HEADERS) -# soup = BeautifulSoup(resp.text, 'lxml') -# # print(soup.prettify()) -# tr_list = soup.find('tbody').find_all('tr') -# for i, tr in enumerate(tr_list): -# tds = tr.find_all('td') -# id_and_port = tds[0] -# ip, port = self._parse_ip_and_port(id_and_port) -# proxy_cover = tds[1].text -# proxy_type = tds[2].text -# region = tds[3].contents[1].text -# supplier= tds[4].text -# # resp_speed = tds[5].text[:-2] -# # last_check_time = tds[6] -# # ttl = tds[7] -# yield ProxyEntity(ip, port, -# source=self._name, -# supplier=supplier, -# proxy_type=self._judge_proxy_type(proxy_type), -# proxy_cover=self._judge_proxy_cover(proxy_cover), -# region=region -# ) -# -# -# def _parse_ip_and_port(self, ip_td: Tag): -# -# res = [] -# contents = ip_td.find_all(['div', 'span']) -# # print(len(contents)) -# for content in contents: -# # print(content) -# res.append(content.text) -# res.pop() -# ip = ''.join(res) -# -# port_tag = contents[-1] -# port_ori_str = port_tag.get('class')[1] -# # 解码真实的端口 -# port = 0 -# for c in port_ori_str: -# port *= 10 -# port += (ord(c) - ord('A')) -# port /= 8 -# port = int(port) -# print(f'ip:{ip}, port:{port}') -# return ip, str(port) -# -# def _judge_proxy_type(self, type_str: str): -# type_low = type_str.lower() -# if type_low == 'http': -# return ProxyTypeEnum.HTTP -# elif type_low == 'https': -# return ProxyTypeEnum.HTTPS -# else: -# return ProxyTypeEnum.UNKNOWN -# -# def _judge_proxy_cover(self, cover_str: str): -# if cover_str == '透明': -# return ProxyCoverEnum.TRANSPARENT -# elif cover_str == '高匿': -# return ProxyCoverEnum.HIGH_COVER -# else: -# return ProxyCoverEnum.UNKNOWN -# -# -# @spider_register -# class SpiderXiciIp(AbsSpider): -# """ -# 西刺代理爬虫 刷新速度:🐌慢 -# https://www.xicidaili.com/ -# """ -# def __init__(self) -> None: -# super().__init__('西刺IP代理爬虫') -# self._base_urls = [ -# 'https://www.xicidaili.com/nn', # 高匿 -# 'https://www.xicidaili.com/nt' # 透明 -# ] -# -# async def do_crawl(self) -> Iterable[ProxyEntity]: -# for base_url in self._base_urls: -# for page in range(1, 4): -# res = requests.get(f'{base_url}/{page}', headers=HEADERS) -# soup = BeautifulSoup(res.text, 'lxml') -# tr_list = soup.find('table', attrs={'id': 'ip_list'}).find_all('tr')[1: -1] -# for tr in tr_list: -# tds = tr.find_all('td') -# # country = tds[0].find('img')['alt'] -# ip = tds[1].text -# port = tds[2].text -# city = tds[3].text.replace('\n', '') -# proxy_cover = tds[4].text -# proxy_type = tds[5].text -# yield ProxyEntity(ip, port, -# source=self._name, -# proxy_cover=self._judge_proxy_cover(proxy_cover), -# proxy_type=self._judge_proxy_type(proxy_type), -# ) -# -# def _judge_proxy_cover(self, cover_str: str): -# if cover_str == '高匿': -# return ProxyCoverEnum.HIGH_COVER -# if cover_str == '透明': -# return ProxyCoverEnum.TRANSPARENT -# else: -# return ProxyCoverEnum.UNKNOWN -# -# def _judge_proxy_type(self, type_str: str): -# if type_str == 'HTTPS': -# return ProxyTypeEnum.HTTPS -# if type_str == 'HTTP': -# return ProxyTypeEnum.HTTP -# else: -# return ProxyTypeEnum.UNKNOWN - -# if __name__ == '__main__': -# print(isinstance(Spider66Ip().do_crawl(), Coroutine)) \ No newline at end of file +@spider_register +class SpiderQuanWangIp(AbsSpider): + """ + 全网IP代理爬虫 刷新速度:极快 + http://www.goubanjia.com/ + """ + def __init__(self) -> None: + super().__init__('全网IP代理爬虫') + self._base_url = 'http://www.goubanjia.com' + + def do_crawl(self) -> List[ProxyEntity]: + result = [] + resp = requests.get(self._base_url, headers=HEADERS) + soup = BeautifulSoup(resp.text, 'lxml') + # print(soup.prettify()) + tr_list = soup.find('tbody').find_all('tr') + for i, tr in enumerate(tr_list): + tds = tr.find_all('td') + id_and_port = tds[0] + ip, port = self._parse_ip_and_port(id_and_port) + proxy_cover = tds[1].text + proxy_type = tds[2].text + region = tds[3].contents[1].text + supplier= tds[4].text + # resp_speed = tds[5].text[:-2] + # last_check_time = tds[6] + # ttl = tds[7] + result.append(ProxyEntity(ip, port, + source=self._name, + supplier=supplier, + proxy_type=self._judge_proxy_type(proxy_type), + proxy_cover=self._judge_proxy_cover(proxy_cover), + region=region + ) + ) + return result + + + def _parse_ip_and_port(self, ip_td: Tag): + + res = [] + contents = ip_td.find_all(['div', 'span']) + # print(len(contents)) + for content in contents: + # print(content) + res.append(content.text) + res.pop() + ip = ''.join(res) + + port_tag = contents[-1] + port_ori_str = port_tag.get('class')[1] + # 解码真实的端口 + port = 0 + for c in port_ori_str: + port *= 10 + port += (ord(c) - ord('A')) + port /= 8 + port = int(port) + print(f'ip:{ip}, port:{port}') + return ip, str(port) + + def _judge_proxy_type(self, type_str: str): + type_low = type_str.lower() + if type_low == 'http': + return ProxyTypeEnum.HTTP + elif type_low == 'https': + return ProxyTypeEnum.HTTPS + else: + return ProxyTypeEnum.UNKNOWN + + def _judge_proxy_cover(self, cover_str: str): + if cover_str == '透明': + return ProxyCoverEnum.TRANSPARENT + elif cover_str == '高匿': + return ProxyCoverEnum.HIGH_COVER + else: + return ProxyCoverEnum.UNKNOWN + + +@spider_register +class SpiderXiciIp(AbsSpider): + """ + 西刺代理爬虫 刷新速度:🐌慢 + https://www.xicidaili.com/ + """ + def __init__(self) -> None: + super().__init__('西刺IP代理爬虫') + self._base_urls = [ + 'https://www.xicidaili.com/nn', # 高匿 + 'https://www.xicidaili.com/nt' # 透明 + ] + + def do_crawl(self) -> List[ProxyEntity]: + result = [] + for base_url in self._base_urls: + for page in range(1, 4): + res = requests.get(f'{base_url}/{page}', headers=HEADERS) + soup = BeautifulSoup(res.text, 'lxml') + tr_list = soup.find('table', attrs={'id': 'ip_list'}).find_all('tr')[1: -1] + for tr in tr_list: + tds = tr.find_all('td') + # country = tds[0].find('img')['alt'] + ip = tds[1].text + port = tds[2].text + city = tds[3].text.replace('\n', '') + proxy_cover = tds[4].text + proxy_type = tds[5].text + result.append(ProxyEntity(ip, port, + source=self._name, + proxy_cover=self._judge_proxy_cover(proxy_cover), + proxy_type=self._judge_proxy_type(proxy_type), + )) + return result + + def _judge_proxy_cover(self, cover_str: str): + if cover_str == '高匿': + return ProxyCoverEnum.HIGH_COVER + if cover_str == '透明': + return ProxyCoverEnum.TRANSPARENT + else: + return ProxyCoverEnum.UNKNOWN + + def _judge_proxy_type(self, type_str: str): + if type_str == 'HTTPS': + return ProxyTypeEnum.HTTPS + if type_str == 'HTTP': + return ProxyTypeEnum.HTTP + else: + return ProxyTypeEnum.UNKNOWN \ No newline at end of file








ApplySandwichStrip

pFad - (p)hone/(F)rame/(a)nonymizer/(d)eclutterfier!      Saves Data!


--- a PPN by Garber Painting Akron. With Image Size Reduction included!

Fetched URL: http://github.com/cwjokaka/ok_ip_proxy_pool/commit/843d95212f856489baff4ba687930612eebc7a50.diff

Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy