-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathimage_reptile.py
250 lines (222 loc) · 7.91 KB
/
image_reptile.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
## ------------ 1. 爬百度图片--------------##
# -*- coding: utf-8 -*-
import requests
import os
def getManyPages(keyword,pages):
params=[]
for i in range(30,30*pages+30,30):
params.append({
'tn': 'resultjson_com',
'ipn': 'rj',
'ct': 201326592,
'is': '',
'fp': 'result',
'queryWord': keyword,
'cl': 2,
'lm': -1,
'ie': 'utf-8',
'oe': 'utf-8',
'adpicid': '',
'st': -1,
'z': '',
'ic': 0,
'word': keyword,
's': '',
'se': '',
'tab': '',
'width': '',
'height': '',
'face': 0,
'istype': 2,
'qc': '',
'nc': 1,
'fr': '',
'pn': i,
'rn': 30,
'gsm': '1e',
'1488942260214': ''
})
# 百度图片搜索网址
url = 'https://image.baidu.com/search/acjson'
urls = []
for i in params:
urls.append(requests.get(url,params=i).json().get('data'))
return urls
def getImg(dataList, localPath):
if not os.path.exists(localPath): # 新建文件夹
os.mkdir(localPath)
x = 0
for list in dataList:
for i in list:
if i.get('thumbURL') != None:
print('downloading...%s' % i.get('thumbURL'))
ir = requests.get(i.get('thumbURL'))
open(localPath + '%d.jpg' % (x+1000), 'wb').write(ir.content)
x += 1
else:
print('Image link does not exist...')
if __name__ == '__main__':
dataList = getManyPages('包',30) # 参数1:关键字,参数2:要下载的页数
getImg(dataList,'./包/') # 参数2:指定保存的路径
## ------------ 2. 爬百度图片--------------##
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import re
import urllib
import json
import socket
import urllib.request
import urllib.parse
import urllib.error
# 设置超时
import time
timeout = 5
socket.setdefaulttimeout(timeout)
class Crawler:
# 睡眠时长
__time_sleep = 0.1
__amount = 0
__start_amount = 0
__counter = 0
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 获取图片url内容等
# t 下载图片时间间隔
def __init__(self, t=0.1):
self.time_sleep = t
# 保存图片
def __save_image(self, rsp_data, word):
if not os.path.exists("./" + word):
os.mkdir("./" + word)
# 判断名字是否重复,获取图片长度
self.__counter = len(os.listdir('./' + word)) + 1
for image_info in rsp_data['imgs']:
try:
time.sleep(self.time_sleep)
fix = self.__get_suffix(image_info['objURL'])
urllib.request.urlretrieve(image_info['objURL'], './' + word + '/' + str(self.__counter) + str(fix))
except urllib.error.HTTPError as urllib_err:
print(urllib_err)
continue
except Exception as err:
time.sleep(1)
print(err)
print("产生未知错误,放弃保存")
continue
else:
print("图片+1,已下载" + str(self.__counter) + "张图片")
self.__counter += 1
return
# 获取后缀名
@staticmethod
def __get_suffix(name):
m = re.search(r'\.[^\.]*$', name)
if m.group(0) and len(m.group(0)) <= 5:
return m.group(0)
else:
return '.jpeg'
# 获取前缀
@staticmethod
def __get_prefix(name):
return name[:name.find('.')]
# 开始获取
def __get_images(self, word='美女'):
search = urllib.parse.quote(word)
# pn int 图片数
pn = self.__start_amount
while pn < self.__amount:
url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + search + '&cg=girl&pn=' + str(
pn) + '&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'
# 设置header防ban
try:
time.sleep(self.time_sleep)
req = urllib.request.Request(url=url, headers=self.headers)
page = urllib.request.urlopen(req)
rsp = page.read().decode('unicode_escape')
except UnicodeDecodeError as e:
print(e)
print('-----UnicodeDecodeErrorurl:', url)
except urllib.error.URLError as e:
print(e)
print("-----urlErrorurl:", url)
except socket.timeout as e:
print(e)
print("-----socket timout:", url)
else:
# 解析json
rsp_data = json.loads(rsp)
self.__save_image(rsp_data, word)
# 读取下一页
print("下载下一页")
pn += 60
finally:
page.close()
print("下载任务结束")
return
def start(self, word, spider_page_num=5, start_page=1):
"""
爬虫入口
:param word: 抓取的关键词
:param spider_page_num: 需要抓取数据页数 总抓取图片数量为 页数x60
:param start_page:起始页数
:return:
"""
self.__start_amount = (start_page - 1) * 60
self.__amount = spider_page_num * 60 + self.__start_amount
self.__get_images(word)
if __name__ == '__main__':
crawler = Crawler(0.05)
crawler.start('nike logo', 3, 3)
## ------------ 3. 爬bing图片--------------##
##support python3
#! usr/bin/env python
# -*- coding: utf-8 -*-
import urllib.error
import time
import os
import urllib.request
import urllib.parse
from bs4 import BeautifulSoup
#从得到的图片链接下载图片,并保存
f=open('out.txt','w',encoding='utf-8')
def SaveImage(link,InputData,count):
try:
time.sleep(0.2)
urllib.request.urlretrieve(link,'./'+InputData+'/'+str(count)+'.jpg')
except urllib.error.HTTPError as urllib_err:
print(urllib_err)
except Exception as err:
time.sleep(1)
print(err)
print("产生未知错误,放弃保存")
else:
print("图+1,已有" + str(count) + "张图")
#找到图片的链接
def FindLink(PageNum,InputData,word):
for i in range(PageNum):
print(i)
try:
url = 'http://cn.bing.com/images/async?q={0}&first={1}&count=35&relp=35&lostate=r&mmasync=1&dgState=x*175_y*848_h*199_c*1_i*106_r*0'
agent = {'User-Agent': "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.165063 Safari/537.36 AppEngine-Google."}
page1 = urllib.request.Request(url.format(InputData, i*35+1), headers=agent)
page = urllib.request.urlopen(page1)
soup = BeautifulSoup(page.read(), 'html.parser')
print(type(soup))
print(soup)
#print(soup.decode('utf-8'))
if not os.path.exists("./" + word):
os.mkdir('./' + word)
for StepOne in soup.select('.mimg'):
print(type(StepOne))
link=StepOne.attrs['src']
count = len(os.listdir('./' + word)) + 1
SaveImage(link,word,count)
except:
print('URL OPENING ERROR !')
if __name__=='__main__':
#输入需要加载的页数,每页35幅图像
PageNum = 50
#输入需要搜索的关键字
word='餐桌'
InputData=urllib.parse.quote(word)
FindLink(PageNum,InputData,word)