-
Notifications
You must be signed in to change notification settings - Fork 166
/
Copy pathscJamlon.py
32 lines (32 loc) · 950 Bytes
/
scJamlon.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
import requests
import urllib.request
import time
from bs4 import BeautifulSoup
import json
import csv
filecsv = open('SouqDataapple.csv', 'w', encoding='utf8')
# Set the URL you want to webscrape from
url = 'https://jamalon.com/en/books?p='
file = open('jamlon.json', 'w', encoding='utf8')
file.write('[\n')
data = {}
for page in range(200):
print('---', page, '---')
r = requests.get(url + str(page))
print(url + str(page))
soup = BeautifulSoup(r.content, "html.parser")
ancher = soup.find_all(
'div', {'class': "col-sm-4 col-md-4 col-xs-12"})
i = 0
for pt in ancher:
name = pt.find('h4')
price = pt.find('span', {'class': 'num'})
if name:
data['name'] = name.text
data['price'] = price.text
json_data = json.dumps(data, ensure_ascii=False)
file.write(json_data)
file.write(",\n")
file.write("\n]")
filecsv.close()
file.close()