搜狗360
爬取网页的图片,选择需要的图片类别
爬百度图片
#!/usr/bin/env Python
# -*- coding:utf-8 -*-
import os
import re
import urllib
import json
import socket
import urllib.request
import urllib.parse
import urllib.ERROR
# 设置超时
import time
timeout = 5
socket.setdefaulttimeout(timeout)
class Crawler:
# 睡眠时长
__time_sleep = 0.1
__amount = 0
__start_amount = 0
__counter = 0
headers = {'User-Agent': 'Mozilla/5.0 (windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 获取图片url内容等
# t 下载图片时间间隔
def __init__(self, t=0.1):
self.time_sleep = t
# 保存图片
def __save_image(self, rsp_data, word):
if not os.path.exists("./" + word):
os.mkdir("./" + word)
# 判断名字是否重复,获取图片长度
self.__counter = len(os.listdir('./' + word)) + 1
for image_info in rsp_data['imgs']:
try:
time.sleep(self.time_sleep)
fix = self.__get_suffix(image_info['objURL'])
urllib.request.urlretrieve(image_info['objURL'], './' + word + '/' + str(self.__counter) + str(fix))
except urllib.error.HTTPError as urllib_err:
print(urllib_err)
continue
except Exception as err:
time.sleep(1)
print(err)
print("产生未知错误,放弃保存")
continue
else:
print(word + "+1,已有" + str(self.__counter) + word)
self.__counter += 1
return
# 获取后缀名
@staticmethod
def __get_suffix(name):
m = re.search(r'\.[^\.]*$', name)
if m.group(0) and len(m.group(0)) <= 5:
return m.group(0)
else:
return '.jpeg'
# 获取前缀
@staticmethod
def __get_prefix(name):
return name[:name.find('.')]
# 开始获取
def __get_images(self, word='机场'):
search = urllib.parse.quote(word)
# pn int 图片数
pn = self.__start_amount
while pn < self.__amount:
url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + search + '&cg=girl&pn=' + str(
pn) + '&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'
# 设置header防ban
try:
time.sleep(self.time_sleep)
req = urllib.request.Request(url=url, headers=self.headers)
page = urllib.request.urlopen(req)
rsp = page.read().decode('unicode_escape')
except UnicodeDecodeError as e:
print(e)
print('-----UnicodeDecodeErrorurl:', url)
except urllib.error.URLError as e:
print(e)
print("-----urlErrorurl:", url)
except socket.timeout as e:
print(e)
print("-----socket timout:", url)
else:
# 解析json
rsp_data = json.loads(rsp)
self.__save_image(rsp_data, word)
# 读取下一页
print("下载下一页")
pn += 60
finally:
page.close()
print("下载任务结束")
return
def start(self, word, spider_page_num=1, start_page=1):
"""
爬虫入口
:param word: 抓取的关键词
:param spider_page_num: 需要抓取数据页数 总抓取图片数量为 页数x60
:param start_page:起始页数
:return:
"""
self.__start_amount = (start_page - 1) * 60
self.__amount = spider_page_num * 60 + self.__start_amount
self.__get_images(word)
if __name__ == '__main__':
crawler = Crawler(0.05)
crawler.start('云彩',2)#更改爬的图片关键词
爬360图片
#-*- coding:GBK -*-
import requests
import json
import urllib
def getSogouImag(category,length,path):
n = length
cate = category
num = 0
m = 0
while num < 2600:
num = num + 48
print(num)
imgs = requests.get('http://pic.sogou.com/pics?query=%D4%C6%B2%CA&mode=1&start='+str(num)+'&reqType=ajax&reqFrom=result&tn=0')
jd = json.loads(imgs.text)
jd = jd['items']
imgs_url = []
for j in jd:
imgs_url.APPend(j['thumbUrl'])
for img_url in imgs_url:
print('***** '+str(m)+'.jpg *****'+' Downloading...')
urllib.request.urlretrieve(img_url,path+str(m)+'.jpg')
m = m + 1
print('Download complete!')
getSogouImag('壁纸',2000,'C:/Users/AdMinistrator/Desktop/云彩/')
爬360图片
from retrying import retry
import requests
import urllib.request
import random
import json
import time
# 创建Img 类
class Img:
def __init__(self): # 初始化函数
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36"
, "referer": "http://s.360.cn/0kee/a.html"
, "Connection": "keep-alive"}
self.num = 0
def get_img_list(self, url): # 获取 存放 图片url 的集合
response = requests.get(url, headers=self.headers)
html_str = response.content.decode()
json_str = json.loads(html_str)
img_str_list = json_str["list"]
img_list = []
for img_object in img_str_list:
img_list.append(img_object["qhimg_url"])
return img_list
def save_img_list(self, img_list):
for img in img_list:
self.save_img(img)
@retry(stop_max_attempt_number=3) # 当保存图片出现异常的时候 就需要用retry 进行回滚 , 再次 保存当前图片 stop_max_attempt_number 重试的次数
def save_img(self, img): # j对获取的 图片url进行下载 保存到本地
f = open("D:\Temp\imges\\" + str(self.num) + ".jpg", "wb")
f.write((urllib.request.urlopen(img)).read())
# time.sleep(10)
print(str(self.num) + "保存成功")
self.num += 1
def run(self): # 实现主要逻辑
total = 1500
while self.num <= total:
# 我们将要访问的url {}是用于接受参数的 当前一次 json 数据 有30 条 ,
self.temp_url = "http://image.so.com/zj?ch=beauty&sn={}&listtype=new&temp=1"
# 1获取链接
url = self.temp_url
# 获取数据
img_list = self.get_img_list(url)
# 保存数据
self.save_img_list(img_list)
# 不要获取数据过于频繁
# time.sleep(60)
print("先休息一会")
if __name__ == '__main__':
img = Img()
img.run()
文章最后发布于: 2019-02-10 22:00:45
相关阅读
[开源]Fre 发布 0.5 版本,更新 diff-patch 和 proxy 方
halo 大家好,我是 132,然后就是…… 经过最近几天的探讨,fre 终于发了一个……能跑起来的版本了这次的更新主要是针对 diff 和 proxy
3CX v15.5 最新版本发布-看点WEB-RTC软电话即开即用
2018年8月30日,天凉好个秋,一阵秋雨一阵凉。3CX统一通信电话系统,V15.5的SP6 正式版发布。在新的版本中,我们可以看到许多新功能的更
-bash: ls: No such file or directory 错误的原因及
ubuntu出现如下错误: { Welcome to Ubuntu 16.04.5 LTS (GNU/Linux 4.15.0-42-generic x86_64) * Documentation: https://help.
https://download.csdn.net/download/qq_22823581/9985033
索尼h.ear系列WH-H900N蓝牙降噪耳机 给用户带来Hi-Res
索尼h.ear系列WH-H900N蓝牙降噪耳机在听音乐时富有表现力,氛围渲染能力较强,用官方描述就是“沉浸式聆听”。支持LDAC,和