Compare commits

..

7 Commits

4 changed files with 60 additions and 39 deletions

View File

@ -85,10 +85,10 @@ def adv_get(url, timeout=None, *args, **kwargs):
url = sanitize_url(url)
if timeout is None:
con = custom_handler(*args, **kwargs).open(url)
con = custom_opener(*args, **kwargs).open(url)
else:
con = custom_handler(*args, **kwargs).open(url, timeout=timeout)
con = custom_opener(*args, **kwargs).open(url, timeout=timeout)
data = con.read()
@ -104,7 +104,7 @@ def adv_get(url, timeout=None, *args, **kwargs):
}
def custom_handler(follow=None, delay=None):
def custom_opener(follow=None, delay=None):
handlers = []
# as per urllib2 source code, these Handelers are added first
@ -346,6 +346,8 @@ class BrowserlyHeaderHandler(BaseHandler):
def iter_html_tag(html_str, tag_name):
" To avoid parsing whole pages when looking for a simple tag "
re_tag = r'<%s(\s*[^>])*>' % tag_name
re_attr = r'(?P<key>[^=\s]+)=[\'"](?P<value>[^\'"]+)[\'"]'

View File

@ -88,16 +88,21 @@ def parse_rules(filename=None):
return rules
def parse(data, url=None, encoding=None):
def parse(data, url=None, encoding=None, ruleset=None):
" Determine which ruleset to use "
rulesets = parse_rules()
if ruleset is not None:
rulesets = [ruleset]
else:
rulesets = parse_rules().values()
parsers = [FeedXML, FeedHTML, FeedJSON]
# 1) Look for a ruleset based on path
if url is not None:
for ruleset in rulesets.values():
for ruleset in rulesets:
if 'path' in ruleset:
for path in ruleset['path']:
if fnmatch(url, path):
@ -111,9 +116,6 @@ def parse(data, url=None, encoding=None):
# 3b) See if .items matches anything
for parser in parsers:
ruleset_candidates = [x for x in rulesets.values() if x['mode'] == parser.mode and 'path' not in x]
# 'path' as they should have been caught beforehands
try:
feed = parser(data, encoding=encoding)
@ -124,13 +126,17 @@ def parse(data, url=None, encoding=None):
else:
# parsing worked, now we try the rulesets
ruleset_candidates = [x for x in rulesets if x.get('mode', None) in (parser.mode, None) and 'path' not in x]
# 'path' as they should have been caught beforehands
# try anyway if no 'mode' specified
for ruleset in ruleset_candidates:
feed.rules = ruleset
try:
feed.items[0]
except (AttributeError, IndexError):
except (AttributeError, IndexError, TypeError):
# parsing and or item picking did not work out
pass
@ -456,7 +462,7 @@ class ParserXML(ParserBase):
def rule_str(self, rule):
match = self.rule_search(rule)
html_rich = ('atom' in rule or self.rules['mode'] == 'html') \
html_rich = ('atom' in rule or self.mode == 'html') \
and rule in [self.rules.get('item_desc'), self.rules.get('item_content')]
if isinstance(match, etree._Element):

View File

@ -96,7 +96,7 @@ class Options:
return self.options[key]
else:
return False
return None
def __setitem__(self, key, value):
self.options[key] = value
@ -104,6 +104,13 @@ class Options:
def __contains__(self, key):
return key in self.options
def get(self, key, default=None):
if key in self.options:
return self.options[key]
else:
return default
def ItemFix(item, options, feedurl='/'):
""" Improves feed items (absolute links, resolve feedburner links, etc) """
@ -276,22 +283,23 @@ def FeedFetch(url, options):
if options.items:
# using custom rules
rss = feeds.FeedHTML(req['data'], encoding=req['encoding'])
ruleset = {}
rss.rules['title'] = options.title if options.title else '//head/title'
rss.rules['desc'] = options.desc if options.desc else '//head/meta[@name="description"]/@content'
ruleset['items'] = options.items
rss.rules['items'] = options.items
ruleset['title'] = options.get('title', '//head/title')
ruleset['desc'] = options.get('desc', '//head/meta[@name="description"]/@content')
rss.rules['item_title'] = options.item_title if options.item_title else '.'
rss.rules['item_link'] = options.item_link if options.item_link else './@href|.//a/@href|ancestor::a/@href'
ruleset['item_title'] = options.get('item_title', '.')
ruleset['item_link'] = options.get('item_link', './@href|.//a/@href|ancestor::a/@href')
if options.item_content:
rss.rules['item_content'] = options.item_content
ruleset['item_content'] = options.item_content
if options.item_time:
rss.rules['item_time'] = options.item_time
ruleset['item_time'] = options.item_time
rss = feeds.parse(req['data'], encoding=req['encoding'], ruleset=ruleset)
rss = rss.convert(feeds.FeedXML)
else:

View File

@ -50,7 +50,7 @@ def parse_options(options):
split = option.split('=', 1)
if len(split) > 1:
out[split[0]] = split[1]
out[split[0]] = unquote(split[1]).replace('|', '/') # | -> / for backward compatibility (and Apache)
else:
out[split[0]] = True
@ -58,14 +58,18 @@ def parse_options(options):
return out
def get_path(environ):
def request_uri(environ):
if 'REQUEST_URI' in environ:
# when running on Apache
url = unquote(environ['REQUEST_URI'][1:])
# when running on Apache/uwsgi
url = environ['REQUEST_URI']
elif 'RAW_URI' in environ:
# gunicorn
url = environ['RAW_URI']
else:
# when using internal server
url = environ['PATH_INFO'][1:]
# when using other servers
url = environ['PATH_INFO']
if environ['QUERY_STRING']:
url += '?' + environ['QUERY_STRING']
@ -76,19 +80,13 @@ def get_path(environ):
def cgi_parse_environ(environ):
# get options
url = get_path(environ)
url = re.sub(r'^/?(cgi/)?(morss.py|main.py)/', '', url)
url = request_uri(environ)[1:]
url = re.sub(r'^(cgi/)?(morss.py|main.py)/', '', url)
if url.startswith(':'):
split = url.split('/', 1)
raw_options = split[0].replace('|', '/').replace('\\\'', '\'').split(':')[1:]
if len(split) > 1:
url = split[1]
else:
url = ''
parts = url.split('/', 1)
raw_options = parts[0].split(':')[1:]
url = parts[1] if len(parts) > 1 else ''
else:
raw_options = []
@ -164,7 +162,7 @@ def middleware(func):
def cgi_file_handler(environ, start_response, app):
" Simple HTTP server to serve static files (.html, .css, etc.) "
url = get_path(environ)
url = request_uri(environ)[1:]
if url == '':
url = 'index.html'
@ -283,11 +281,18 @@ def cgi_handle_request():
wsgiref.handlers.CGIHandler().run(app)
class WSGIRequestHandlerRequestUri(wsgiref.simple_server.WSGIRequestHandler):
def get_environ(self):
env = super().get_environ()
env['REQUEST_URI'] = self.path
return env
def cgi_start_server():
crawler.default_cache.autotrim()
print('Serving http://localhost:%s/' % PORT)
httpd = wsgiref.simple_server.make_server('', PORT, application)
httpd = wsgiref.simple_server.make_server('', PORT, application, handler_class=WSGIRequestHandlerRequestUri)
httpd.serve_forever()