2 from __future__ import unicode_literals, print_function
9 from bs4 import BeautifulSoup
10 from tabulate import tabulate
12 import nhentai.constant as constant
13 from nhentai.utils import request
14 from nhentai.logger import logger
17 def _get_csrf_token(content):
18 html = BeautifulSoup(content, 'html.parser')
19 csrf_token_elem = html.find('input', attrs={'name': 'csrfmiddlewaretoken'})
20 if not csrf_token_elem:
21 raise Exception('Cannot find csrf token to login')
22 return csrf_token_elem.attrs['value']
25 def login(username, password):
26 logger.warning('This feature is deprecated, please use --cookie to set your cookie.')
27 csrf_token = _get_csrf_token(request('get', url=constant.LOGIN_URL).text)
28 if os.getenv('DEBUG'):
29 logger.info('Getting CSRF token ...')
31 if os.getenv('DEBUG'):
32 logger.info('CSRF token is {}'.format(csrf_token))
35 'csrfmiddlewaretoken': csrf_token,
36 'username_or_email': username,
39 resp = request('post', url=constant.LOGIN_URL, data=login_dict)
41 if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
42 csrf_token = _get_csrf_token(resp.text)
43 resp = request('post', url=resp.url, data={'csrfmiddlewaretoken': csrf_token, 'next': '/'})
45 if 'Invalid username/email or password' in resp.text:
46 logger.error('Login failed, please check your username and password')
49 if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
50 logger.error('Using nhentai --cookie \'YOUR_COOKIE_HERE\' to save your Cookie.')
54 def _get_title_and_id(response):
56 html = BeautifulSoup(response, 'html.parser')
57 doujinshi_search_result = html.find_all('div', attrs={'class': 'gallery'})
58 for doujinshi in doujinshi_search_result:
59 doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'})
60 title = doujinshi_container.text.strip()
61 title = title if len(title) < 85 else title[:82] + '...'
62 id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
63 result.append({'id': id_, 'title': title})
68 def favorites_parser():
70 html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser')
71 count = html.find('span', attrs={'class': 'count'})
73 logger.error("Can't get your number of favorited doujins. Did the login failed?")
76 count = int(count.text.strip('(').strip(')').replace(',', ''))
78 logger.warning('No favorites found')
80 pages = int(count / 25)
83 pages += 1 if count % (25 * pages) else 0
87 logger.info('You have %d favorites in %d pages.' % (count, pages))
89 if os.getenv('DEBUG'):
92 for page in range(1, pages + 1):
94 logger.info('Getting doujinshi ids of page %d' % page)
95 resp = request('get', constant.FAV_URL + '?page=%d' % page).content
97 result.extend(_get_title_and_id(resp))
98 except Exception as e:
99 logger.error('Error: %s, continue', str(e))
104 def doujinshi_parser(id_):
105 if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
106 raise Exception('Doujinshi id({0}) is not valid'.format(id_))
109 logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
111 doujinshi['id'] = id_
112 url = '{0}/{1}/'.format(constant.DETAIL_URL, id_)
115 response = request('get', url)
116 if response.status_code in (200, ):
117 response = response.content
119 logger.debug('Slow down and retry ({}) ...'.format(id_))
121 return doujinshi_parser(str(id_))
123 except Exception as e:
124 logger.critical(str(e))
127 html = BeautifulSoup(response, 'html.parser')
128 doujinshi_info = html.find('div', attrs={'id': 'info'})
130 title = doujinshi_info.find('h1').text
131 subtitle = doujinshi_info.find('h2')
133 doujinshi['name'] = title
134 doujinshi['subtitle'] = subtitle.text if subtitle else ''
136 doujinshi_cover = html.find('div', attrs={'id': 'cover'})
137 img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png)$', doujinshi_cover.a.img.attrs['data-src'])
140 for i in html.find_all('div', attrs={'class': 'thumb-container'}):
141 _, ext_name = os.path.basename(i.img.attrs['data-src']).rsplit('.', 1)
145 logger.critical('Tried yo get image id failed')
148 doujinshi['img_id'] = img_id.group(1)
149 doujinshi['ext'] = ext
152 for _ in doujinshi_info.find_all('div', class_=''):
153 pages = re.search('([\d]+) pages', _.text)
155 pages = pages.group(1)
157 doujinshi['pages'] = int(pages)
159 # gain information of the doujinshi
160 information_fields = doujinshi_info.find_all('div', attrs={'class': 'field-name'})
161 needed_fields = ['Characters', 'Artists', 'Languages', 'Tags']
162 for field in information_fields:
163 field_name = field.contents[0].strip().strip(':')
164 if field_name in needed_fields:
165 data = [sub_field.contents[0].strip() for sub_field in
166 field.find_all('a', attrs={'class': 'tag'})]
167 doujinshi[field_name.lower()] = ', '.join(data)
172 def search_parser(keyword, page):
173 logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
175 response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page}).content
176 except requests.ConnectionError as e:
178 logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
181 result = _get_title_and_id(response)
183 logger.warn('Not found anything of keyword {}'.format(keyword))
188 def print_doujinshi(doujinshi_list):
189 if not doujinshi_list:
191 doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list]
192 headers = ['id', 'doujinshi']
193 logger.info('Search Result\n' +
194 tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
197 def tag_parser(tag_name, max_page=1):
199 tag_name = tag_name.lower()
200 tag_name = tag_name.replace(' ', '-')
202 for p in range(1, max_page + 1):
203 logger.debug('Fetching page {0} for doujinshi with tag \'{1}\''.format(p, tag_name))
204 response = request('get', url='%s/%s/?page=%d' % (constant.TAG_URL, tag_name, p)).content
206 result += _get_title_and_id(response)
208 logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
212 logger.warn('No results for tag \'{}\''.format(tag_name))
217 def __api_suspended_search_parser(keyword, page):
218 logger.debug('Searching doujinshis using keywords {0}'.format(keyword))
223 response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json()
224 except Exception as e:
227 logger.critical(str(e))
228 logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
233 if 'result' not in response:
234 raise Exception('No result in response')
236 for row in response['result']:
237 title = row['title']['english']
238 title = title[:85] + '..' if len(title) > 85 else title
239 result.append({'id': row['id'], 'title': title})
242 logger.warn('No results for keywords {}'.format(keyword))
247 def __api_suspended_tag_parser(tag_id, max_page=1):
248 logger.info('Searching for doujinshi with tag id {0}'.format(tag_id))
250 response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json()
251 page = max_page if max_page <= response['num_pages'] else int(response['num_pages'])
253 for i in range(1, page + 1):
254 logger.info('Getting page {} ...'.format(i))
257 response = request('get', url=constant.TAG_API_URL,
258 params={'sort': 'popular', 'tag_id': tag_id}).json()
259 for row in response['result']:
260 title = row['title']['english']
261 title = title[:85] + '..' if len(title) > 85 else title
262 result.append({'id': row['id'], 'title': title})
265 logger.warn('No results for tag id {}'.format(tag_id))
270 def __api_suspended_doujinshi_parser(id_):
271 if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
272 raise Exception('Doujinshi id({0}) is not valid'.format(id_))
275 logger.log(15, 'Fetching information of doujinshi id {0}'.format(id_))
277 doujinshi['id'] = id_
278 url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
282 response = request('get', url).json()
283 except Exception as e:
286 logger.critical(str(e))
291 doujinshi['name'] = response['title']['english']
292 doujinshi['subtitle'] = response['title']['japanese']
293 doujinshi['img_id'] = response['media_id']
294 doujinshi['ext'] = ''.join(map(lambda s: s['t'], response['images']['pages']))
295 doujinshi['pages'] = len(response['images']['pages'])
297 # gain information of the doujinshi
298 needed_fields = ['character', 'artist', 'language', 'tag']
299 for tag in response['tags']:
300 tag_type = tag['type']
301 if tag_type in needed_fields:
302 if tag_type == 'tag':
303 if tag_type not in doujinshi:
304 doujinshi[tag_type] = {}
306 tag['name'] = tag['name'].replace(' ', '-')
307 tag['name'] = tag['name'].lower()
308 doujinshi[tag_type][tag['name']] = tag['id']
309 elif tag_type not in doujinshi:
310 doujinshi[tag_type] = tag['name']
312 doujinshi[tag_type] += ', ' + tag['name']
317 if __name__ == '__main__':
318 print(doujinshi_parser("32271"))