2 from __future__ import unicode_literals, print_function
9 from bs4 import BeautifulSoup
10 from tabulate import tabulate
12 import nhentai.constant as constant
13 from nhentai.logger import logger
16 session = requests.Session()
17 session.headers.update({
18 'Referer': constant.LOGIN_URL,
19 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',
23 def request(method, url, **kwargs):
25 if not hasattr(session, method):
26 raise AttributeError('\'requests.Session\' object has no attribute \'{0}\''.format(method))
28 return getattr(session, method)(url, proxies=constant.PROXY, verify=False, **kwargs)
31 def _get_csrf_token(content):
32 html = BeautifulSoup(content, 'html.parser')
33 csrf_token_elem = html.find('input', attrs={'name': 'csrfmiddlewaretoken'})
34 if not csrf_token_elem:
35 raise Exception('Cannot find csrf token to login')
36 return csrf_token_elem.attrs['value']
39 def login(username, password):
40 csrf_token = _get_csrf_token(request('get', url=constant.LOGIN_URL).text)
41 if os.getenv('DEBUG'):
42 logger.info('Getting CSRF token ...')
44 if os.getenv('DEBUG'):
45 logger.info('CSRF token is {}'.format(csrf_token))
48 'csrfmiddlewaretoken': csrf_token,
49 'username_or_email': username,
52 resp = request('post', url=constant.LOGIN_URL, data=login_dict)
54 if 'You\'re loading pages way too quickly.' in resp.text:
55 csrf_token = _get_csrf_token(resp.text)
56 resp = request('post', url=resp.url, data={'csrfmiddlewaretoken': csrf_token, 'next': '/'})
58 if 'Invalid username/email or password' in resp.text:
59 logger.error('Login failed, please check your username and password')
62 if 'You\'re loading pages way too quickly.' in resp.text:
63 logger.error('You meet challenge insistently, please submit a issue'
64 ' at https://github.com/RicterZ/nhentai/issues')
69 html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser')
70 count = html.find('span', attrs={'class': 'count'})
72 logger.error("Can't get your number of favorited doujins. Did the login failed?")
75 count = int(count.text.strip('(').strip(')').replace(',', ''))
77 logger.warning('No favorites found')
79 pages = int(count / 25)
82 pages += 1 if count % (25 * pages) else 0
86 logger.info('You have %d favorites in %d pages.' % (count, pages))
88 if os.getenv('DEBUG'):
92 doujinshi_id = re.compile('data-id="([\d]+)"')
94 def _callback(request, result):
97 thread_pool = threadpool.ThreadPool(5)
99 for page in range(1, pages + 1):
101 logger.info('Getting doujinshi ids of page %d' % page)
102 resp = request('get', constant.FAV_URL + '?page=%d' % page).text
103 ids = doujinshi_id.findall(resp)
104 requests_ = threadpool.makeRequests(doujinshi_parser, ids, _callback)
105 [thread_pool.putRequest(req) for req in requests_]
107 except Exception as e:
108 logger.error('Error: %s, continue', str(e))
113 def doujinshi_parser(id_):
114 if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
115 raise Exception('Doujinshi id({0}) is not valid'.format(id_))
118 logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
120 doujinshi['id'] = id_
121 url = '{0}/{1}/'.format(constant.DETAIL_URL, id_)
124 response = request('get', url).content
125 except Exception as e:
126 logger.critical(str(e))
129 html = BeautifulSoup(response, 'html.parser')
130 doujinshi_info = html.find('div', attrs={'id': 'info'})
132 title = doujinshi_info.find('h1').text
133 subtitle = doujinshi_info.find('h2')
135 doujinshi['name'] = title
136 doujinshi['subtitle'] = subtitle.text if subtitle else ''
138 doujinshi_cover = html.find('div', attrs={'id': 'cover'})
139 img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png)$', doujinshi_cover.a.img.attrs['data-src'])
142 for i in html.find_all('div', attrs={'class': 'thumb-container'}):
143 _, ext_name = os.path.basename(i.img.attrs['data-src']).rsplit('.', 1)
147 logger.critical('Tried yo get image id failed')
150 doujinshi['img_id'] = img_id.group(1)
151 doujinshi['ext'] = ext
154 for _ in doujinshi_info.find_all('div', class_=''):
155 pages = re.search('([\d]+) pages', _.text)
157 pages = pages.group(1)
159 doujinshi['pages'] = int(pages)
161 # gain information of the doujinshi
162 information_fields = doujinshi_info.find_all('div', attrs={'class': 'field-name'})
163 needed_fields = ['Characters', 'Artists', 'Language', 'Tags']
164 for field in information_fields:
165 field_name = field.contents[0].strip().strip(':')
166 if field_name in needed_fields:
167 data = [sub_field.contents[0].strip() for sub_field in
168 field.find_all('a', attrs={'class': 'tag'})]
169 doujinshi[field_name.lower()] = ', '.join(data)
174 def search_parser(keyword, page):
175 logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
178 response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page}).content
179 except requests.ConnectionError as e:
181 logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
184 html = BeautifulSoup(response, 'html.parser')
185 doujinshi_search_result = html.find_all('div', attrs={'class': 'gallery'})
186 for doujinshi in doujinshi_search_result:
187 doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'})
188 title = doujinshi_container.text.strip()
189 title = title if len(title) < 85 else title[:82] + '...'
190 id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
191 result.append({'id': id_, 'title': title})
193 logger.warn('Not found anything of keyword {}'.format(keyword))
198 def __api_suspended_doujinshi_parser(id_):
199 if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
200 raise Exception('Doujinshi id({0}) is not valid'.format(id_))
203 logger.log(15, 'Fetching information of doujinshi id {0}'.format(id_))
205 doujinshi['id'] = id_
206 url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
210 response = request('get', url).json()
211 except Exception as e:
214 logger.critical(str(e))
219 doujinshi['name'] = response['title']['english']
220 doujinshi['subtitle'] = response['title']['japanese']
221 doujinshi['img_id'] = response['media_id']
222 doujinshi['ext'] = ''.join(map(lambda s: s['t'], response['images']['pages']))
223 doujinshi['pages'] = len(response['images']['pages'])
225 # gain information of the doujinshi
226 needed_fields = ['character', 'artist', 'language', 'tag']
227 for tag in response['tags']:
228 tag_type = tag['type']
229 if tag_type in needed_fields:
230 if tag_type == 'tag':
231 if tag_type not in doujinshi:
232 doujinshi[tag_type] = {}
234 tag['name'] = tag['name'].replace(' ', '-')
235 tag['name'] = tag['name'].lower()
236 doujinshi[tag_type][tag['name']] = tag['id']
237 elif tag_type not in doujinshi:
238 doujinshi[tag_type] = tag['name']
240 doujinshi[tag_type] += ', ' + tag['name']
245 def __api_suspended_search_parser(keyword, page):
246 logger.debug('Searching doujinshis using keywords {0}'.format(keyword))
251 response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json()
252 except Exception as e:
255 logger.critical(str(e))
256 logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
261 if 'result' not in response:
262 raise Exception('No result in response')
264 for row in response['result']:
265 title = row['title']['english']
266 title = title[:85] + '..' if len(title) > 85 else title
267 result.append({'id': row['id'], 'title': title})
270 logger.warn('No results for keywords {}'.format(keyword))
275 def print_doujinshi(doujinshi_list):
276 if not doujinshi_list:
278 doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list]
279 headers = ['id', 'doujinshi']
280 logger.info('Search Result\n' +
281 tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
284 def __api_suspended_tag_parser(tag_id, max_page=1):
285 logger.info('Searching for doujinshi with tag id {0}'.format(tag_id))
287 response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json()
288 page = max_page if max_page <= response['num_pages'] else int(response['num_pages'])
290 for i in range(1, page + 1):
291 logger.info('Getting page {} ...'.format(i))
294 response = request('get', url=constant.TAG_API_URL,
295 params={'sort': 'popular', 'tag_id': tag_id}).json()
296 for row in response['result']:
297 title = row['title']['english']
298 title = title[:85] + '..' if len(title) > 85 else title
299 result.append({'id': row['id'], 'title': title})
302 logger.warn('No results for tag id {}'.format(tag_id))
307 def tag_parser(tag_name, max_page=1):
309 tag_name = tag_name.lower()
310 tag_name = tag_name.replace(' ', '-')
312 for p in range(1, max_page + 1):
313 logger.debug('Fetching page {0} for doujinshi with tag \'{1}\''.format(p, tag_name))
314 response = request('get', url='%s/%s?page=%d' % (constant.TAG_URL, tag_name, p)).content
316 html = BeautifulSoup(response, 'html.parser')
317 doujinshi_items = html.find_all('div', attrs={'class': 'gallery'})
318 if not doujinshi_items:
319 logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
322 for i in doujinshi_items:
323 doujinshi_id = i.a.attrs['href'].strip('/g')
324 doujinshi_title = i.a.text.strip()
325 doujinshi_title = doujinshi_title if len(doujinshi_title) < 85 else doujinshi_title[:82] + '...'
326 result.append({'title': doujinshi_title, 'id': doujinshi_id})
329 logger.warn('No results for tag \'{}\''.format(tag_name))
334 if __name__ == '__main__':
335 print(doujinshi_parser("32271"))