-__version__ = '0.1.4'
+__version__ = '0.1.5'
__author__ = 'Ricter'
__email__ = 'ricterzheng@gmail.com'
if args.is_download and not args.id and not args.ids and not args.keyword:
logger.critical('Doujinshi id/ids is required for downloading')
parser.print_help()
- raise SystemExit
+ exit(1)
if args.id:
args.ids = (args.id, ) if not args.ids else args.ids
if not args.keyword and not args.ids:
parser.print_help()
- raise SystemExit
+ exit(1)
if args.threads <= 0:
args.threads = 1
elif args.threads > 10:
logger.critical('Maximum number of used threads is 10')
- raise SystemExit
+ exit(1)
if args.proxy:
import urlparse
proxy_url = urlparse.urlparse(args.proxy)
if proxy_url.scheme not in ('http', 'https'):
- logger.error('Invalid protocol \'{}\' of proxy, ignored'.format(proxy_url.scheme))
+ logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme))
else:
constant.PROXY = {proxy_url.scheme: args.proxy}
doujinshi_info = doujinshi_parser(id)
doujinshi_list.append(Doujinshi(**doujinshi_info))
else:
- raise SystemExit
+ exit(1)
if options.is_download:
downloader = Downloader(path=options.saved_path,
def signal_handler(signal, frame):
logger.error('Ctrl-C signal received. Quit.')
- raise SystemExit
+ exit(1)
signal.signal(signal.SIGINT, signal_handler)
self.info = DoujinshiInfo(**kwargs)
def __repr__(self):
- return '<Doujinshi: {}>'.format(self.name)
+ return '<Doujinshi: {0}>'.format(self.name)
def show(self):
table = [
["URL", self.url],
["Pages", self.pages],
]
- logger.info(u'Print doujinshi information\n{}'.format(tabulate(table)))
+ logger.info(u'Print doujinshi information\n{0}'.format(tabulate(table)))
def download(self):
logger.info('Start download doujinshi: %s' % self.name)
self.timeout = timeout
def _download(self, url, folder='', filename='', retried=False):
- logger.info('Start downloading: {} ...'.format(url))
+ logger.info('Start downloading: {0} ...'.format(url))
filename = filename if filename else os.path.basename(urlparse(url).path)
try:
with open(os.path.join(folder, filename), "wb") as f:
f.write(chunk)
except requests.HTTPError as e:
if not retried:
- logger.error('Error: {}, retrying'.format(str(e)))
+ logger.error('Error: {0}, retrying'.format(str(e)))
return self._download(url=url, folder=folder, filename=filename, retried=True)
else:
return None
def _download_callback(self, request, result):
if not result:
logger.critical('Too many errors occurred, quit.')
- raise SystemExit
- logger.log(15, '{} download successfully'.format(result))
+ exit(1)
+ logger.log(15, '{0} download successfully'.format(result))
def download(self, queue, folder=''):
if not isinstance(folder, (str, unicode)):
folder = os.path.join(self.path, folder)
if not os.path.exists(folder):
- logger.warn('Path \'{}\' not exist.'.format(folder))
+ logger.warn('Path \'{0}\' not exist.'.format(folder))
try:
os.makedirs(folder)
except EnvironmentError as e:
- logger.critical('Error: {}'.format(str(e)))
- raise SystemExit
+ logger.critical('Error: {0}'.format(str(e)))
+ exit(1)
else:
- logger.warn('Path \'{}\' already exist.'.format(folder))
+ logger.warn('Path \'{0}\' already exist.'.format(folder))
queue = [([url], {'folder': folder}) for url in queue]
def request(method, url, **kwargs):
if not hasattr(requests, method):
- raise AttributeError('\'requests\' object has no attribute \'{}\''.format(method))
+ raise AttributeError('\'requests\' object has no attribute \'{0}\''.format(method))
return requests.__dict__[method](url, proxies=constant.PROXY, **kwargs)
def doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
- raise Exception('Doujinshi id({}) is not valid'.format(id_))
+ raise Exception('Doujinshi id({0}) is not valid'.format(id_))
id_ = int(id_)
- logger.log(15, 'Fetching doujinshi information of id {}'.format(id_))
+ logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
doujinshi = dict()
doujinshi['id'] = id_
- url = '{}/{}/'.format(constant.DETAIL_URL, id_)
+ url = '{0}/{1}/'.format(constant.DETAIL_URL, id_)
try:
response = request('get', url).content
def search_parser(keyword, page):
- logger.debug('Searching doujinshis of keyword {}'.format(keyword))
+ logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
result = []
try:
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page}).content
except requests.ConnectionError as e:
logger.critical(e)
logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
- raise SystemExit
+ exit(1)
html = BeautifulSoup(response)
doujinshi_search_result = html.find_all('div', attrs={'class': 'gallery'})
--- /dev/null
+[metadata]
+description-file = README.rst
+
+import codecs
from setuptools import setup, find_packages
from nhentai import __version__, __author__, __email__
with open('requirements.txt') as f:
requirements = [l for l in f.read().splitlines() if l]
+
+def long_description():
+ with codecs.open('README.rst', 'r') as f:
+ return f.read()
+
setup(
name='nhentai',
version=__version__,
author_email=__email__,
keywords='nhentai, doujinshi',
description='nhentai.net doujinshis downloader',
+ long_description=long_description(),
url='https://github.com/RicterZ/nhentai',
+ download_url='https://github.com/RicterZ/nhentai/tarball/master',
include_package_data=True,
zip_safe=False,