help='slow down between downloading every doujinshi')
parser.add_option('--proxy', type='string', dest='proxy', action='store',
help='store a proxy, for example: -p \'http://127.0.0.1:1080\'')
- parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.')
+ parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.')
parser.add_option('--format', type='string', dest='name_format', action='store',
help='format the saved folder name', default='[%i][%a][%t]')
parser.add_option('--dry-run', '-r', action='store_true', dest='dryrun', help='Dry run, skip file download.')
help='generate PDF file')
parser.add_option('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False,
help='remove downloaded doujinshi dir when generated CBZ or PDF file.')
+ parser.add_option('--meta', dest='generate_metadata', action='store_true',
+ help='generate a metadata file in doujinshi format')
# nhentai options
parser.add_option('--cookie', type='str', dest='cookie', action='store',
parser.print_help()
exit(1)
- if not args.keyword and not args.id and not args.favorites:
+ if not args.keyword and not args.id and not args.favorites:
parser.print_help()
exit(1)
from nhentai.downloader import Downloader
from nhentai.logger import logger
from nhentai.constant import BASE_URL
-from nhentai.utils import generate_html, generate_cbz, generate_main_html, generate_pdf, \
+from nhentai.utils import generate_html, generate_cbz, generate_main_html, generate_pdf, generate_metadata_file, \
paging, check_cookie, signal_handler, DB
doujinshi.downloader = downloader
doujinshi.download()
+ doujinshi.downloader = downloader
+ doujinshi.download()
+
+ if options.generate_metadata:
+ table = doujinshi.table
+ generate_metadata_file(options.output_dir, table, doujinshi)
+
if options.is_save_download_history:
with DB() as db:
db.add_one(doujinshi.id)
signal.signal(signal.SIGINT, signal_handler)
-
if __name__ == '__main__':
main()
from nhentai.logger import logger
from nhentai.utils import format_filename
-
EXT_MAP = {
'j': 'jpg',
'p': 'png',
name_format = name_format.replace('%s', self.info.subtitle)
self.filename = format_filename(name_format)
- def __repr__(self):
- return '<Doujinshi: {0}>'.format(self.name)
-
- def show(self):
- table = [
+ self.table = [
["Parodies", self.info.parodies],
["Doujinshi", self.name],
["Subtitle", self.info.subtitle],
["URL", self.url],
["Pages", self.pages],
]
- logger.info(u'Print doujinshi information of {0}\n{1}'.format(self.id, tabulate(table)))
+
+ def __repr__(self):
+ return '<Doujinshi: {0}>'.format(self.name)
+
+ def show(self):
+
+ logger.info(u'Print doujinshi information of {0}\n{1}'.format(self.id, tabulate(self.table)))
def download(self):
logger.info('Starting to download doujinshi: %s' % self.name)
logger.warning('Page count and ext count do not equal')
for i in range(1, min(self.pages, len(self.ext)) + 1):
- download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext[i-1]))
+ download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext[i - 1]))
self.downloader.download(download_queue, self.filename)
else:
if not os.path.splitext(image)[1] in ('.jpg', '.png'):
continue
- image_html += '<img src="{0}" class="image-item"/>\n'\
+ image_html += '<img src="{0}" class="image-item"/>\n' \
.format(image)
html = readfile('viewer/{}/index.html'.format(template))
css = readfile('viewer/{}/styles.css'.format(template))
else:
with open('./main.html', 'wb') as f:
f.write(data.encode('utf-8'))
- shutil.copy(os.path.dirname(__file__)+'/viewer/logo.png', './')
+ shutil.copy(os.path.dirname(__file__) + '/viewer/logo.png', './')
set_js_database()
logger.log(
15, 'Main Viewer has been written to \'{0}main.html\''.format(output_dir))
except ImportError:
logger.error("Please install img2pdf package by using pip.")
+
def unicode_truncate(s, length, encoding='utf-8'):
"""https://stackoverflow.com/questions/1809531/truncating-unicode-so-it-fits-a-maximum-size-when-encoded-for-wire-transfer
"""
"""
# maybe you can use `--format` to select a suitable filename
ban_chars = '\\\'/:,;*?"<>|\t'
- filename = s.translate(str.maketrans(ban_chars, ' '*len(ban_chars))).strip()
+ filename = s.translate(str.maketrans(ban_chars, ' ' * len(ban_chars))).strip()
filename = ' '.join(filename.split())
while filename.endswith('.'):
start, end = i.split('-')
if not (start.isdigit() and end.isdigit()):
raise Exception('Invalid page number')
- page_list.extend(list(range(int(start), int(end)+1)))
+ page_list.extend(list(range(int(start), int(end) + 1)))
else:
if not i.isdigit():
raise Exception('Invalid page number')
return page_list
+def generate_metadata_file(output_dir, table, doujinshi_obj=None):
+ logger.info('Writing Metadata Info')
+
+ if doujinshi_obj is not None:
+ doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
+ else:
+ doujinshi_dir = '.'
+
+ logger.info(doujinshi_dir)
+
+ f = open(os.path.join(doujinshi_dir, 'info.txt'), 'w', encoding='utf-8')
+
+ fields = ['TITLE', 'ORIGINAL TITLE', 'AUTHOR', 'ARTIST', 'CIRCLE', 'SCANLATOR',
+ 'TRANSLATOR', 'PUBLISHER', 'DESCRIPTION', 'STATUS', 'CHAPTERS', 'PAGES',
+ 'TAGS', 'TYPE', 'LANGUAGE', 'RELEASED', 'READING DIRECTION', 'CHARACTERS',
+ 'SERIES', 'PARODY', 'URL']
+ special_fields = ['PARODY', 'TITLE', 'ORIGINAL TITLE', 'CHARACTERS', 'AUTHOR',
+ 'LANGUAGE', 'TAGS', 'URL', 'PAGES']
+
+ for i in range(len(fields)):
+ f.write('{}: '.format(fields[i]))
+ if fields[i] in special_fields:
+ f.write(str(table[special_fields.index(fields[i])][1]))
+ f.write('\n')
+
+ f.close()
+
+
class DB(object):
conn = None
cur = None