searx_ynh/sources/searx/utils.py

233 lines
6.2 KiB
Python
Raw Normal View History

2015-01-13 17:13:08 +01:00
import cStringIO
import csv
import os
import re
2015-09-08 23:05:37 +02:00
from babel.dates import format_date
2014-05-10 11:56:19 +02:00
from codecs import getincrementalencoder
2014-12-01 12:26:38 +01:00
from HTMLParser import HTMLParser
from random import choice
from searx.version import VERSION_STRING
from searx import settings
2015-01-13 17:13:08 +01:00
from searx import logger
2014-12-01 12:26:38 +01:00
2015-01-13 17:13:08 +01:00
logger = logger.getChild('utils')
2014-05-10 11:56:19 +02:00
2015-09-08 23:05:37 +02:00
ua_versions = ('33.0',
2015-02-09 13:30:16 +01:00
'34.0',
2015-09-08 23:05:37 +02:00
'35.0',
'36.0',
'37.0',
'38.0',
'39.0',
'40.0')
2014-12-01 12:26:38 +01:00
2014-05-27 12:13:54 +02:00
ua_os = ('Windows NT 6.3; WOW64',
'X11; Linux x86_64',
'X11; Linux x86')
2014-12-01 12:26:38 +01:00
2015-09-08 23:05:37 +02:00
ua = "Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}"
2014-05-10 11:56:19 +02:00
2015-01-13 17:13:08 +01:00
blocked_tags = ('script',
'style')
2014-05-10 11:56:19 +02:00
def gen_useragent():
# TODO
return ua.format(os=choice(ua_os), version=choice(ua_versions))
2014-12-01 12:26:38 +01:00
def searx_useragent():
2015-01-13 17:13:08 +01:00
return 'searx/{searx_version} {suffix}'.format(
searx_version=VERSION_STRING,
2015-09-08 23:05:37 +02:00
suffix=settings['outgoing'].get('useragent_suffix', ''))
2014-12-01 12:26:38 +01:00
2014-05-10 11:56:19 +02:00
def highlight_content(content, query):
if not content:
return None
# ignoring html contents
# TODO better html content detection
if content.find('<') != -1:
return content
query = query.decode('utf-8')
if content.lower().find(query.lower()) > -1:
query_regex = u'({0})'.format(re.escape(query))
2014-05-27 12:13:54 +02:00
content = re.sub(query_regex, '<span class="highlight">\\1</span>',
content, flags=re.I | re.U)
2014-05-10 11:56:19 +02:00
else:
regex_parts = []
for chunk in query.split():
if len(chunk) == 1:
regex_parts.append(u'\W+{0}\W+'.format(re.escape(chunk)))
else:
regex_parts.append(u'{0}'.format(re.escape(chunk)))
query_regex = u'({0})'.format('|'.join(regex_parts))
2014-05-27 12:13:54 +02:00
content = re.sub(query_regex, '<span class="highlight">\\1</span>',
content, flags=re.I | re.U)
2014-05-10 11:56:19 +02:00
return content
class HTMLTextExtractor(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.result = []
2015-01-13 17:13:08 +01:00
self.tags = []
def handle_starttag(self, tag, attrs):
self.tags.append(tag)
def handle_endtag(self, tag):
2015-02-09 13:30:16 +01:00
if not self.tags:
return
2015-01-13 17:13:08 +01:00
if tag != self.tags[-1]:
raise Exception("invalid html")
2015-02-09 13:30:16 +01:00
2015-01-13 17:13:08 +01:00
self.tags.pop()
def is_valid_tag(self):
return not self.tags or self.tags[-1] not in blocked_tags
2014-05-10 11:56:19 +02:00
def handle_data(self, d):
2015-01-13 17:13:08 +01:00
if not self.is_valid_tag():
return
2014-05-10 11:56:19 +02:00
self.result.append(d)
def handle_charref(self, number):
2015-01-13 17:13:08 +01:00
if not self.is_valid_tag():
return
2014-05-10 11:56:19 +02:00
if number[0] in (u'x', u'X'):
codepoint = int(number[1:], 16)
else:
codepoint = int(number)
self.result.append(unichr(codepoint))
def handle_entityref(self, name):
2015-01-13 17:13:08 +01:00
if not self.is_valid_tag():
return
2014-12-01 12:26:38 +01:00
# codepoint = htmlentitydefs.name2codepoint[name]
# self.result.append(unichr(codepoint))
2014-05-10 11:56:19 +02:00
self.result.append(name)
def get_text(self):
2015-02-09 13:30:16 +01:00
return u''.join(self.result).strip()
2014-05-10 11:56:19 +02:00
def html_to_text(html):
2015-02-09 13:30:16 +01:00
html = html.replace('\n', ' ')
html = ' '.join(html.split())
2014-05-10 11:56:19 +02:00
s = HTMLTextExtractor()
s.feed(html)
return s.get_text()
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = getincrementalencoder(encoding)()
def writerow(self, row):
unicode_row = []
for col in row:
if type(col) == str or type(col) == unicode:
unicode_row.append(col.encode('utf-8').strip())
else:
unicode_row.append(col)
self.writer.writerow(unicode_row)
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
2014-12-01 12:26:38 +01:00
def get_themes(root):
"""Returns available themes list."""
static_path = os.path.join(root, 'static')
templates_path = os.path.join(root, 'templates')
2015-01-13 17:13:08 +01:00
themes = os.listdir(os.path.join(static_path, 'themes'))
2014-12-01 12:26:38 +01:00
return static_path, templates_path, themes
2015-01-13 17:13:08 +01:00
def get_static_files(base_path):
base_path = os.path.join(base_path, 'static')
static_files = set()
base_path_length = len(base_path) + 1
for directory, _, files in os.walk(base_path):
for filename in files:
f = os.path.join(directory[base_path_length:], filename)
static_files.add(f)
return static_files
def get_result_templates(base_path):
base_path = os.path.join(base_path, 'templates')
result_templates = set()
base_path_length = len(base_path) + 1
for directory, _, files in os.walk(base_path):
if directory.endswith('result_templates'):
for filename in files:
f = os.path.join(directory[base_path_length:], filename)
result_templates.add(f)
return result_templates
2015-09-08 23:05:37 +02:00
def format_date_by_locale(date, locale_string):
2015-01-13 17:13:08 +01:00
# strftime works only on dates after 1900
2015-09-08 23:05:37 +02:00
if date.year <= 1900:
return date.isoformat().split('T')[0]
if locale_string == 'all':
locale_string = settings['ui']['default_locale'] or 'en_US'
return format_date(date, locale=locale_string)
2015-02-09 13:30:16 +01:00
def dict_subset(d, properties):
result = {}
for k in properties:
if k in d:
result[k] = d[k]
return result
2015-09-08 23:05:37 +02:00
def prettify_url(url, max_length=74):
if len(url) > max_length:
chunk_len = max_length / 2 + 1
return u'{0}[...]{1}'.format(url[:chunk_len], url[-chunk_len:])
2015-02-09 13:30:16 +01:00
else:
return url
2015-09-08 23:05:37 +02:00
# get element in list or default value
def list_get(a_list, index, default=None):
if len(a_list) > index:
return a_list[index]
else:
return default