自动输入验证码
爬虫--实现图片验证码全自动输入
爬取网站:豆瓣(https://accounts.douban.com/login)
爬虫思路:1. 使用selenium, 对图片验证码进行截图操作,
2. 接入打码平台--云打码,传输图片,返回验证码
一. 爬虫代码如下:
import time
from selenium import webdriver
from PIL import Image
from io import BytesIO # 以IO的形式转换为二进制
import YDM_test
class DouBan(object):
def __init__(self):
self.url = 'https://accounts.douban.com/login'
self.headers = {'User-Agent': 'Mozilla/5.0 (windows NT 10.0; WOW64) APPleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'}
def run(self):
# 创建chrome对象, 发送get请求
chrome = webdriver.Chrome('/home/Python/Desktop/chromedriver')
chrome.get(self.url)
# 获取输入框元素, 输入账号密码
time.sleep(1)
chrome.find_element_by_id('email').send_keys('[email protected]')
chrome.find_element_by_id('password').send_keys('your_pwd')
# 截图打码 --> 重点
screen_shot = chrome.get_screenshot_as_png() # 获取全屏截图的对象
screen_image = Image.open(BytesIO(screen_shot)) # 以IO的形式转换为二进制
screen_image.save('00_screen_image.png')
# 对验证码图片区域进行截图,保存
code_image_el = chrome.find_element_by_id('captcha_image') # 获取验证码图片对象
height = code_image_el.size['height']
width = code_image_el.size['width']
left = code_image_el.location['x']
top = code_image_el.location['y']
right = left + width
bottom = top + height
# 验证码图片--区域位置信息
cut_info = (left, top, right, bottom)
print(cut_info)
# 截图操作,保存截图
cut_image = screen_image.crop(cut_info)
cut_image.save('00_cut_image.png')
# 全自动打码--云打码平台
code = YDM_test.image_decode('00_cut_image.png')
chrome.find_element_by_id('captcha_field').send_keys(code)
# 点击登陆
chrome.find_element_by_name('login').click()
# 退出浏览器
time.sleep(3)
chrome.quit()
if __name__ == '__main__':
db = DouBan()
db.run()
二. 云打码SDK接口文件 --> YDM_test.py
import http.client, mimetypes, urllib, json, time, requests
######################################################################
class YDMHttp:
apiurl = 'http://api.yundama.com/api.php'
username = ''
password = ''
appid = ''
appkey = ''
def __init__(self, username, password, appid, appkey):
self.username = username
self.password = password
self.appid = str(appid)
self.appkey = appkey
def request(self, fields, files=[]):
response = self.post_url(self.apiurl, fields, files)
response = json.loads(response)
return response
def balance(self):
data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
response = self.request(data)
if (response):
if (response['ret'] and response['ret'] < 0):
return response['ret']
else:
return response['balance']
else:
return -9001
def login(self):
data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey}
response = self.request(data)
if (response):
if (response['ret'] and response['ret'] < 0):
return response['ret']
else:
return response['uid']
else:
return -9001
def upload(self, filename, codetype, timeout):
data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)}
file = {'file': filename}
response = self.request(data, file)
if (response):
if (response['ret'] and response['ret'] < 0):
return response['ret']
else:
return response['cid']
else:
return -9001
def result(self, cid):
data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid)}
response = self.request(data)
return response and response['text'] or ''
def decode(self, filename, codetype, timeout):
cid = self.upload(filename, codetype, timeout)
if (cid > 0):
for i in range(0, timeout):
result = self.result(cid)
if (result != ''):
return cid, result
else:
time.sleep(1)
return -3003, ''
else:
return cid, ''
def report(self, cid):
data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid), 'flag': '0'}
response = self.request(data)
if (response):
return response['ret']
else:
return -9001
def post_url(self, url, fields, files=[]):
for key in files:
files[key] = open(files[key], 'rb')
res = requests.post(url, files=files, data=fields)
return res.text
######################################################################
# 用户名
username = '注册的用户账号'
# 密码
password = '账号密码'
# 软件ID,开发者分成必要参数。登录开发者后台【我的软件】获得!
appid = 4999 # 对应的在平台中创建的软件id
# 软件密钥,开发者分成必要参数。登录开发者后台【我的软件】获得!
appkey = '需要在云打码平台中创建获取'
# 超时时间,秒
timeout = 30
# 封装调用接口--image_decode()
def image_decode(filename, codetype=3000):
# 检查
if (username == 'username'):
print('请设置好相关参数再测试')
else:
# 初始化
yundama = YDMHttp(username, password, appid, appkey)
# 登陆云打码
uid = yundama.login()
print('uid: %s' % uid)
# 查询余额
balance = yundama.balance()
print('balance: %s' % balance)
# 开始识别,图片路径,验证码类型ID,超时时间(秒),识别结果
cid, result = yundama.decode(filename, codetype, timeout)
print('cid: %s, result: %s' % (cid, result))
return result
######################################################################
if __name__ == '__main__':
image_decode('00_cut_image.png')
------------------END-------------------------
相关阅读
为什么我们要爬取数据 在大数据时代,我们要获取更多数据,就要进行数据的挖掘、分析、筛选,比如当我们做一个项目的时候,需要大量
网络爬虫是SEO人员应该学习的基础知识之一,认识和理解网络爬虫有助于更好地优化网站。今天,SEO百科网带来的是《网络爬虫的分类和策
百度是中国目前的第一大搜索引擎,拥有完善的一套爬虫算法,了解百度蜘蛛的爬虫原理,对我们SEO优化工作有着举足轻重的作用。我们可以
Github Clone项目、创建分支 下载Github for Windows桌面客户端 把master主分支clone到本地先把master主分支clone到本地 客户
目标 网络爬虫的是做什么的? 手动写一个简单的网络爬虫; 1. 网络爬虫 1.1. 名称 网络爬虫(又被称为网页蜘蛛,网络机器人),是一种