高考派
from urllib import request,parse
class GaoKaoPaiSpider(object):
def __init__(self):
数据库相关
self.client = pymysql.Connect(
host='127.0.0.1',user='root',
password='ljh1314',database='gaokaopai',
port=3306,charset='utf8'
)
# 创建游标
self.cursor = self.client.cursor()
# 添加排行榜排名默认字段
self.rank = 1
def send_request(self,form=None,headers=None):
"""发起请求,获取数据"""
form_data = parse.urlencode(form).encode('utf-8')
if not headers:
headers = {
'User-Agent':'Mozilla/5.0 (windows NT 10.0; Win64; x64) APPleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
}
url = 'http://www.gaokaopai.com/rank-index.html'
req = request.Request(url=url,headers=headers,data=form_data)
ssl_context = ssl._create_unverified_context()
response = request.urlopen(req,context=ssl_context)
html = response.read().decode('utf-8')
# has_next:判断是否需要继续请求下一页
has_next = self.parse_response(html)
#获取下一页
if has_next:
form['start'] = form['start']+25
self.send_request(form=form)
else:
print('数据获取完毕')
def parse_response(self,html):
#!DOCTYPE HTML 如果!DOCTYPE HTML存在html中
# 说明返回的不是json字符串,而是html,在此案例中
# 表明数据已经获取完毕
if '!DOCTYPE HTML' in html:
return false
# json.loads:参数必须是一个json字符串
json_data = json.loads(html)
jobs = json_data['data']['ranks']
for job in jobs:
job_info = {}
job_info['top'] = self.rank
job_info['title'] =job['uni_name']
job_info['zongfen'] = self.get_default_num(data=job['xiao_total'],isFloat=True)
job_info['class'] = job['uni_type']
job_info['dizhi'] = job['city_code']
job_info['pici'] = '本科一批'
self.rank += 1
print(job_info)
self.save_data_to_mysql(job_info)
# 如果jobs列表数据大于0,说明,可能存在下一页,
# 继续发起请求,所以返回True
if len(jobs) > 0:
return True
elif len(jobs) == 0:
return False
def get_default_num(self,data=None,defalut=0,isFloat=False):
"""
:param data: 传递的参数
:param defalut: 设置默认值
:param isFloat: 是否是浮点型
:return:
"""
if data:
if isFloat:
return float(data)
else:
return int(data)
else:
if isFloat:
return float(defalut)
else:
return defalut
def save_data_to_mysql(self,job_info):
"""插入数据"""
insert_sql = """
INSERT INTO dsa(%s)
VALUES (%s)
"""%(
','.join(job_info.keys()),
','.join(['%s']*len(job_info)),
)
try:
self.cursor.execute(insert_sql,list(job_info.values()))
self.client.commit()
print('插入成功')
except Exception as err:
self.client.rollback()
print(err)
if name == 'main':
spider = GaoKaoPaiSpider()
"""
otype: 2
city: 0
cate: 0
BATch_type: 0
start: 25
amount: 25
"""
form = {
'otype': 2,
'city': 0,
'cate': 0,
'batch_type':0,
'start':25,
'amount':25
}
spider.send_request(form=form)
转载于:https://www.jianshu.com/p/179815985d7f
相关阅读
#1.导包 import requests,os from lxml import etree from urllib.request import urlretrieve from urllib.parse import urljoi
# -*- coding: utf-8 -*- import scrapy from SNBook.items import SnbookItem import re class SnBookSpider(scrapy.Spider):
autojs调用java的类库爬取bilibili视频弹幕内容,get请
/** * @功能 爬取指定bilibili视频弹幕 * @作者 家 * @感谢 内个谁, ProjectXero * @难点 不会解压gzip和deflate,再次感谢上
Scrapy生成的项目目录 文件说明:scrapy.cfg 项目的配置信息,主要为Scrapy命令行工具提供一个基础的配置信息。(真正爬虫相关的配置
爬取清纯妹子 爬取图片,保存D盘下的image。 不多说废话,直接上码。我偷偷打码了 0.0 #!/usr/bin/python # -*- coding: utf-8 -*-