Separate function for output. Add csv

master
pictuga 2014-01-13 00:10:57 +01:00
parent 7fbe728f93
commit 4d6ef92504
1 changed files with 38 additions and 26 deletions

View File

@ -29,6 +29,7 @@ from gzip import GzipFile
from StringIO import StringIO from StringIO import StringIO
from readability import readability from readability import readability
from html2text import HTML2Text
LIM_ITEM = 100 # deletes what's beyond LIM_ITEM = 100 # deletes what's beyond
LIM_TIME = 7 # deletes what's after LIM_TIME = 7 # deletes what's after
@ -616,18 +617,6 @@ def Gather(rss, url, cache, options):
if not options.proxy: if not options.proxy:
Fill(item, cache, url) Fill(item, cache, url)
if 'al' in options:
if i+1 > int(options.al):
item.remove()
return
if item.desc and item.content:
if options.clip:
item.content = item.desc + "<br/><br/><center>* * *</center><br/><br/>" + item.content
del item.desc
if not options.keep:
del item.desc
queue = Queue.Queue() queue = Queue.Queue()
for i in range(THREADS): for i in range(THREADS):
@ -646,6 +635,39 @@ def Gather(rss, url, cache, options):
return rss return rss
def After(rss, options):
for i, item in enumerate(rss.items):
if 'al' in options:
if i+1 > int(options.al):
item.remove()
continue
if item.desc and item.content:
if options.clip:
item.content = item.desc + "<br/><br/><center>* * *</center><br/><br/>" + item.content
del item.desc
if not options.keep:
del item.desc
if options.md:
conv = HTML2Text(baseurl=item.link)
conv.unicode_snob = True
if item.desc:
item.desc = conv.handle(item.desc)
if item.content:
item.content = conv.handle(item.content)
if options.json:
if options.indent:
return rss.tojson(indent=4)
else:
return rss.tojson()
elif options.csv:
return rss.tocsv()
else:
return rss.tostring(xml_declaration=True, encoding='UTF-8')
def cgi_app(environ, start_response): def cgi_app(environ, start_response):
options = ParseOptions(environ) options = ParseOptions(environ)
url = options.url url = options.url
@ -671,6 +693,8 @@ def cgi_app(environ, start_response):
headers['content-type'] = 'text/plain' headers['content-type'] = 'text/plain'
elif options.json: elif options.json:
headers['content-type'] = 'application/json' headers['content-type'] = 'application/json'
elif options.csv:
headers['content-type'] = 'text/csv'
else: else:
headers['content-type'] = 'text/xml' headers['content-type'] = 'text/xml'
@ -684,13 +708,7 @@ def cgi_app(environ, start_response):
start_response(headers['status'], headers.items()) start_response(headers['status'], headers.items())
if not DEBUG and not options.silent: if not DEBUG and not options.silent:
if options.json: return After(RSS, options)
if options.indent:
return json.dumps(RSS, sort_keys=True, indent=4, default=lambda x: dict(x))
else:
return json.dumps(RSS, sort_keys=True, default=lambda x: dict(x))
else:
return RSS.tostring(xml_declaration=True, encoding='UTF-8')
log('done') log('done')
@ -724,13 +742,7 @@ def cli_app():
RSS = Gather(RSS, url, cache, options) RSS = Gather(RSS, url, cache, options)
if not DEBUG and not options.silent: if not DEBUG and not options.silent:
if options.json: print After(RSS, options)
if options.indent:
print json.dumps(RSS, sort_keys=True, indent=4, default=lambda x: dict(x))
else:
print json.dumps(RSS, sort_keys=True, default=lambda x: dict(x))
else:
print RSS.tostring(xml_declaration=True, encoding='UTF-8')
log('done') log('done')