update version 0.5
This commit is contained in:
parent
5bcc6bcd65
commit
ecf52785ae
|
@ -27,3 +27,5 @@ generally made searx better:
|
|||
- Martin Zimmermann
|
||||
- @courgette
|
||||
- @kernc
|
||||
- @Cqoicebordel
|
||||
- @Reventl0v
|
||||
|
|
|
@ -20,6 +20,7 @@ $(python):
|
|||
|
||||
tests: .installed.cfg
|
||||
@bin/test
|
||||
@grunt test --gruntfile searx/static/oscar/gruntfile.js
|
||||
|
||||
robot: .installed.cfg
|
||||
@bin/robot
|
||||
|
@ -48,6 +49,9 @@ styles:
|
|||
@lessc -x searx/static/oscar/less/bootstrap/bootstrap.less > searx/static/oscar/css/bootstrap.min.css
|
||||
@lessc -x searx/static/oscar/less/oscar/oscar.less > searx/static/oscar/css/oscar.min.css
|
||||
|
||||
grunt:
|
||||
@grunt --gruntfile searx/static/oscar/gruntfile.js
|
||||
|
||||
locales:
|
||||
@pybabel compile -d searx/translations
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
flask
|
||||
flask-babel
|
||||
grequests
|
||||
requests
|
||||
lxml
|
||||
pyyaml
|
||||
python-dateutil
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
## Bing (Web)
|
||||
#
|
||||
#
|
||||
# @website https://www.bing.com
|
||||
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
|
||||
#
|
||||
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
|
||||
# max. 5000 query/month
|
||||
#
|
||||
# @using-api no (because of query limit)
|
||||
# @results HTML (using search portal)
|
||||
# @stable no (HTML can change)
|
||||
|
@ -58,8 +59,8 @@ def response(resp):
|
|||
content = escape(' '.join(result.xpath('.//p//text()')))
|
||||
|
||||
# append result
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
'content': content})
|
||||
|
||||
# return results if something is found
|
||||
|
@ -74,8 +75,8 @@ def response(resp):
|
|||
content = escape(' '.join(result.xpath('.//p//text()')))
|
||||
|
||||
# append result
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
'content': content})
|
||||
|
||||
# return results
|
||||
|
|
|
@ -1,17 +1,19 @@
|
|||
## Bing (Images)
|
||||
#
|
||||
#
|
||||
# @website https://www.bing.com/images
|
||||
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
|
||||
#
|
||||
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
|
||||
# max. 5000 query/month
|
||||
#
|
||||
# @using-api no (because of query limit)
|
||||
# @results HTML (using search portal)
|
||||
# @stable no (HTML can change)
|
||||
# @parse url, title, img_src
|
||||
#
|
||||
# @todo currently there are up to 35 images receive per page, because bing does not parse count=10. limited response to 10 images
|
||||
# @todo currently there are up to 35 images receive per page,
|
||||
# because bing does not parse count=10.
|
||||
# limited response to 10 images
|
||||
|
||||
from urllib import urlencode
|
||||
from cgi import escape
|
||||
from lxml import html
|
||||
from yaml import load
|
||||
import re
|
||||
|
@ -51,15 +53,15 @@ def response(resp):
|
|||
dom = html.fromstring(resp.content)
|
||||
|
||||
# init regex for yaml-parsing
|
||||
p = re.compile( '({|,)([a-z]+):(")')
|
||||
p = re.compile('({|,)([a-z]+):(")')
|
||||
|
||||
# parse results
|
||||
for result in dom.xpath('//div[@class="dg_u"]'):
|
||||
link = result.xpath('./a')[0]
|
||||
|
||||
# parse yaml-data (it is required to add a space, to make it parsable)
|
||||
yaml_data = load(p.sub( r'\1\2: \3', link.attrib.get('m')))
|
||||
|
||||
yaml_data = load(p.sub(r'\1\2: \3', link.attrib.get('m')))
|
||||
|
||||
title = link.attrib.get('t1')
|
||||
#url = 'http://' + link.attrib.get('t3')
|
||||
url = yaml_data.get('surl')
|
||||
|
@ -69,7 +71,7 @@ def response(resp):
|
|||
results.append({'template': 'images.html',
|
||||
'url': url,
|
||||
'title': title,
|
||||
'content': '',
|
||||
'content': '',
|
||||
'img_src': img_src})
|
||||
|
||||
# TODO stop parsing if 10 images are found
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
## Bing (News)
|
||||
#
|
||||
#
|
||||
# @website https://www.bing.com/news
|
||||
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search), max. 5000 query/month
|
||||
#
|
||||
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
|
||||
# max. 5000 query/month
|
||||
#
|
||||
# @using-api no (because of query limit)
|
||||
# @results HTML (using search portal)
|
||||
# @stable no (HTML can change)
|
||||
|
@ -57,12 +58,12 @@ def response(resp):
|
|||
url = link.attrib.get('href')
|
||||
title = ' '.join(link.xpath('.//text()'))
|
||||
contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
|
||||
if contentXPath != None:
|
||||
if contentXPath is not None:
|
||||
content = escape(' '.join(contentXPath))
|
||||
|
||||
|
||||
# parse publishedDate
|
||||
publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
|
||||
if publishedDateXPath != None:
|
||||
if publishedDateXPath is not None:
|
||||
publishedDate = escape(' '.join(publishedDateXPath))
|
||||
|
||||
if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
|
||||
|
@ -89,10 +90,10 @@ def response(resp):
|
|||
except TypeError:
|
||||
# FIXME
|
||||
publishedDate = datetime.now()
|
||||
|
||||
|
||||
# append result
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
'publishedDate': publishedDate,
|
||||
'content': content})
|
||||
|
||||
|
|
|
@ -55,6 +55,6 @@ def response(resp):
|
|||
resp.search_params['to'].lower()
|
||||
)
|
||||
|
||||
results.append({'answer' : answer, 'url': url})
|
||||
results.append({'answer': answer, 'url': url})
|
||||
|
||||
return results
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
## Dailymotion (Videos)
|
||||
#
|
||||
#
|
||||
# @website https://www.dailymotion.com
|
||||
# @provide-api yes (http://www.dailymotion.com/developer)
|
||||
#
|
||||
#
|
||||
# @using-api yes
|
||||
# @results JSON
|
||||
# @stable yes
|
||||
|
@ -12,7 +12,6 @@
|
|||
|
||||
from urllib import urlencode
|
||||
from json import loads
|
||||
from lxml import html
|
||||
|
||||
# engine dependent config
|
||||
categories = ['videos']
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
## Deviantart (Images)
|
||||
#
|
||||
#
|
||||
# @website https://www.deviantart.com/
|
||||
# @provide-api yes (https://www.deviantart.com/developers/) (RSS)
|
||||
#
|
||||
#
|
||||
# @using-api no (TODO, rewrite to api)
|
||||
# @results HTML
|
||||
# @stable no (HTML can change)
|
||||
|
|
|
@ -1,15 +1,17 @@
|
|||
## DuckDuckGo (Web)
|
||||
#
|
||||
#
|
||||
# @website https://duckduckgo.com/
|
||||
# @provide-api yes (https://duckduckgo.com/api), but not all results from search-site
|
||||
#
|
||||
# @provide-api yes (https://duckduckgo.com/api),
|
||||
# but not all results from search-site
|
||||
#
|
||||
# @using-api no
|
||||
# @results HTML (using search portal)
|
||||
# @stable no (HTML can change)
|
||||
# @parse url, title, content
|
||||
#
|
||||
# @todo rewrite to api
|
||||
# @todo language support (the current used site does not support language-change)
|
||||
# @todo language support
|
||||
# (the current used site does not support language-change)
|
||||
|
||||
from urllib import urlencode
|
||||
from lxml.html import fromstring
|
||||
|
@ -37,7 +39,7 @@ def request(query, params):
|
|||
if params['language'] == 'all':
|
||||
locale = 'en-us'
|
||||
else:
|
||||
locale = params['language'].replace('_','-').lower()
|
||||
locale = params['language'].replace('_', '-').lower()
|
||||
|
||||
params['url'] = url.format(
|
||||
query=urlencode({'q': query, 'kl': locale}),
|
||||
|
|
|
@ -3,21 +3,25 @@ from urllib import urlencode
|
|||
from lxml import html
|
||||
from searx.engines.xpath import extract_text
|
||||
|
||||
url = 'https://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1&d=1'
|
||||
url = 'https://api.duckduckgo.com/'\
|
||||
+ '?{query}&format=json&pretty=0&no_redirect=1&d=1'
|
||||
|
||||
|
||||
def result_to_text(url, text, htmlResult):
|
||||
# TODO : remove result ending with "Meaning" or "Category"
|
||||
dom = html.fromstring(htmlResult)
|
||||
a = dom.xpath('//a')
|
||||
if len(a)>=1:
|
||||
if len(a) >= 1:
|
||||
return extract_text(a[0])
|
||||
else:
|
||||
return text
|
||||
|
||||
|
||||
def html_to_text(htmlFragment):
|
||||
dom = html.fromstring(htmlFragment)
|
||||
return extract_text(dom)
|
||||
|
||||
|
||||
def request(query, params):
|
||||
# TODO add kl={locale}
|
||||
params['url'] = url.format(query=urlencode({'q': query}))
|
||||
|
@ -38,16 +42,15 @@ def response(resp):
|
|||
# add answer if there is one
|
||||
answer = search_res.get('Answer', '')
|
||||
if answer != '':
|
||||
results.append({ 'answer' : html_to_text(answer) })
|
||||
results.append({'answer': html_to_text(answer)})
|
||||
|
||||
# add infobox
|
||||
if 'Definition' in search_res:
|
||||
content = content + search_res.get('Definition', '')
|
||||
content = content + search_res.get('Definition', '')
|
||||
|
||||
if 'Abstract' in search_res:
|
||||
content = content + search_res.get('Abstract', '')
|
||||
|
||||
|
||||
# image
|
||||
image = search_res.get('Image', '')
|
||||
image = None if image == '' else image
|
||||
|
@ -55,29 +58,35 @@ def response(resp):
|
|||
# attributes
|
||||
if 'Infobox' in search_res:
|
||||
infobox = search_res.get('Infobox', None)
|
||||
if 'content' in infobox:
|
||||
if 'content' in infobox:
|
||||
for info in infobox.get('content'):
|
||||
attributes.append({'label': info.get('label'), 'value': info.get('value')})
|
||||
attributes.append({'label': info.get('label'),
|
||||
'value': info.get('value')})
|
||||
|
||||
# urls
|
||||
for ddg_result in search_res.get('Results', []):
|
||||
if 'FirstURL' in ddg_result:
|
||||
firstURL = ddg_result.get('FirstURL', '')
|
||||
text = ddg_result.get('Text', '')
|
||||
urls.append({'title':text, 'url':firstURL})
|
||||
results.append({'title':heading, 'url': firstURL})
|
||||
urls.append({'title': text, 'url': firstURL})
|
||||
results.append({'title': heading, 'url': firstURL})
|
||||
|
||||
# related topics
|
||||
for ddg_result in search_res.get('RelatedTopics', None):
|
||||
if 'FirstURL' in ddg_result:
|
||||
suggestion = result_to_text(ddg_result.get('FirstURL', None), ddg_result.get('Text', None), ddg_result.get('Result', None))
|
||||
suggestion = result_to_text(ddg_result.get('FirstURL', None),
|
||||
ddg_result.get('Text', None),
|
||||
ddg_result.get('Result', None))
|
||||
if suggestion != heading:
|
||||
results.append({'suggestion': suggestion})
|
||||
elif 'Topics' in ddg_result:
|
||||
suggestions = []
|
||||
relatedTopics.append({ 'name' : ddg_result.get('Name', ''), 'suggestions': suggestions })
|
||||
relatedTopics.append({'name': ddg_result.get('Name', ''),
|
||||
'suggestions': suggestions})
|
||||
for topic_result in ddg_result.get('Topics', []):
|
||||
suggestion = result_to_text(topic_result.get('FirstURL', None), topic_result.get('Text', None), topic_result.get('Result', None))
|
||||
suggestion = result_to_text(topic_result.get('FirstURL', None),
|
||||
topic_result.get('Text', None),
|
||||
topic_result.get('Result', None))
|
||||
if suggestion != heading:
|
||||
suggestions.append(suggestion)
|
||||
|
||||
|
@ -86,21 +95,26 @@ def response(resp):
|
|||
if abstractURL != '':
|
||||
# add as result ? problem always in english
|
||||
infobox_id = abstractURL
|
||||
urls.append({'title': search_res.get('AbstractSource'), 'url': abstractURL})
|
||||
urls.append({'title': search_res.get('AbstractSource'),
|
||||
'url': abstractURL})
|
||||
|
||||
# definition
|
||||
definitionURL = search_res.get('DefinitionURL', '')
|
||||
if definitionURL != '':
|
||||
# add as result ? as answer ? problem always in english
|
||||
infobox_id = definitionURL
|
||||
urls.append({'title': search_res.get('DefinitionSource'), 'url': definitionURL})
|
||||
urls.append({'title': search_res.get('DefinitionSource'),
|
||||
'url': definitionURL})
|
||||
|
||||
# entity
|
||||
entity = search_res.get('Entity', None)
|
||||
# TODO continent / country / department / location / waterfall / mountain range : link to map search, get weather, near by locations
|
||||
# TODO continent / country / department / location / waterfall /
|
||||
# mountain range :
|
||||
# link to map search, get weather, near by locations
|
||||
# TODO musician : link to music search
|
||||
# TODO concert tour : ??
|
||||
# TODO film / actor / television / media franchise : links to IMDB / rottentomatoes (or scrap result)
|
||||
# TODO film / actor / television / media franchise :
|
||||
# links to IMDB / rottentomatoes (or scrap result)
|
||||
# TODO music : link tu musicbrainz / last.fm
|
||||
# TODO book : ??
|
||||
# TODO artist / playwright : ??
|
||||
|
@ -114,24 +128,25 @@ def response(resp):
|
|||
# TODO programming language : ??
|
||||
# TODO file format : ??
|
||||
|
||||
if len(heading)>0:
|
||||
if len(heading) > 0:
|
||||
# TODO get infobox.meta.value where .label='article_title'
|
||||
if image==None and len(attributes)==0 and len(urls)==1 and len(relatedTopics)==0 and len(content)==0:
|
||||
if image is None and len(attributes) == 0 and len(urls) == 1 and\
|
||||
len(relatedTopics) == 0 and len(content) == 0:
|
||||
results.append({
|
||||
'url': urls[0]['url'],
|
||||
'title': heading,
|
||||
'content': content
|
||||
})
|
||||
'url': urls[0]['url'],
|
||||
'title': heading,
|
||||
'content': content
|
||||
})
|
||||
else:
|
||||
results.append({
|
||||
'infobox': heading,
|
||||
'id': infobox_id,
|
||||
'entity': entity,
|
||||
'content': content,
|
||||
'img_src' : image,
|
||||
'attributes': attributes,
|
||||
'urls': urls,
|
||||
'relatedTopics': relatedTopics
|
||||
})
|
||||
'infobox': heading,
|
||||
'id': infobox_id,
|
||||
'entity': entity,
|
||||
'content': content,
|
||||
'img_src': image,
|
||||
'attributes': attributes,
|
||||
'urls': urls,
|
||||
'relatedTopics': relatedTopics
|
||||
})
|
||||
|
||||
return results
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
## Dummy
|
||||
#
|
||||
#
|
||||
# @results empty array
|
||||
# @stable yes
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
## Faroo (Web, News)
|
||||
#
|
||||
#
|
||||
# @website http://www.faroo.com
|
||||
# @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
|
||||
#
|
||||
#
|
||||
# @using-api yes
|
||||
# @results JSON
|
||||
# @stable yes
|
||||
|
@ -24,9 +24,10 @@ api_key = None
|
|||
url = 'http://www.faroo.com/'
|
||||
search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
|
||||
|
||||
search_category = {'general': 'web',
|
||||
search_category = {'general': 'web',
|
||||
'news': 'news'}
|
||||
|
||||
|
||||
# do search-request
|
||||
def request(query, params):
|
||||
offset = (params['pageno']-1) * number_of_results + 1
|
||||
|
@ -48,7 +49,7 @@ def request(query, params):
|
|||
query=urlencode({'q': query}),
|
||||
language=language,
|
||||
categorie=categorie,
|
||||
api_key=api_key )
|
||||
api_key=api_key)
|
||||
|
||||
# using searx User-Agent
|
||||
params['headers']['User-Agent'] = searx_useragent()
|
||||
|
@ -101,7 +102,7 @@ def response(resp):
|
|||
results.append({'template': 'images.html',
|
||||
'url': result['url'],
|
||||
'title': result['title'],
|
||||
'content': result['kwic'],
|
||||
'content': result['kwic'],
|
||||
'img_src': result['iurl']})
|
||||
|
||||
# return results
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
## General Files (Files)
|
||||
#
|
||||
#
|
||||
# @website http://www.general-files.org
|
||||
# @provide-api no (nothing found)
|
||||
#
|
||||
#
|
||||
# @using-api no (because nothing found)
|
||||
# @results HTML (using search portal)
|
||||
# @stable no (HTML can change)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
## Github (It)
|
||||
#
|
||||
#
|
||||
# @website https://github.com/
|
||||
# @provide-api yes (https://developer.github.com/v3/)
|
||||
#
|
||||
#
|
||||
# @using-api yes
|
||||
# @results JSON
|
||||
# @stable yes (using api)
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
## Google (Web)
|
||||
#
|
||||
# Google (Web)
|
||||
#
|
||||
# @website https://www.google.com
|
||||
# @provide-api yes (https://developers.google.com/custom-search/)
|
||||
#
|
||||
#
|
||||
# @using-api no
|
||||
# @results HTML
|
||||
# @stable no (HTML can change)
|
||||
# @parse url, title, content, suggestion
|
||||
|
||||
from urllib import urlencode
|
||||
from urlparse import unquote,urlparse,parse_qsl
|
||||
from urlparse import urlparse, parse_qsl
|
||||
from lxml import html
|
||||
from searx.engines.xpath import extract_text, extract_url
|
||||
|
||||
|
@ -23,10 +23,13 @@ google_hostname = 'www.google.com'
|
|||
search_path = '/search'
|
||||
redirect_path = '/url'
|
||||
images_path = '/images'
|
||||
search_url = 'https://' + google_hostname + search_path + '?{query}&start={offset}&gbv=1'
|
||||
search_url = ('https://' +
|
||||
google_hostname +
|
||||
search_path +
|
||||
'?{query}&start={offset}&gbv=1')
|
||||
|
||||
# specific xpath variables
|
||||
results_xpath= '//li[@class="g"]'
|
||||
results_xpath = '//li[@class="g"]'
|
||||
url_xpath = './/h3/a/@href'
|
||||
title_xpath = './/h3'
|
||||
content_xpath = './/span[@class="st"]'
|
||||
|
@ -36,15 +39,18 @@ images_xpath = './/div/a'
|
|||
image_url_xpath = './@href'
|
||||
image_img_src_xpath = './img/@src'
|
||||
|
||||
|
||||
# remove google-specific tracking-url
|
||||
def parse_url(url_string):
|
||||
parsed_url = urlparse(url_string)
|
||||
if parsed_url.netloc in [google_hostname, ''] and parsed_url.path==redirect_path:
|
||||
if (parsed_url.netloc in [google_hostname, '']
|
||||
and parsed_url.path == redirect_path):
|
||||
query = dict(parse_qsl(parsed_url.query))
|
||||
return query['q']
|
||||
else:
|
||||
return url_string
|
||||
|
||||
|
||||
# do search-request
|
||||
def request(query, params):
|
||||
offset = (params['pageno'] - 1) * 10
|
||||
|
@ -52,7 +58,7 @@ def request(query, params):
|
|||
if params['language'] == 'all':
|
||||
language = 'en'
|
||||
else:
|
||||
language = params['language'].replace('_','-').lower()
|
||||
language = params['language'].replace('_', '-').lower()
|
||||
|
||||
params['url'] = search_url.format(offset=offset,
|
||||
query=urlencode({'q': query}))
|
||||
|
@ -74,19 +80,24 @@ def response(resp):
|
|||
try:
|
||||
url = parse_url(extract_url(result.xpath(url_xpath), search_url))
|
||||
parsed_url = urlparse(url)
|
||||
if parsed_url.netloc==google_hostname and parsed_url.path==search_path:
|
||||
if (parsed_url.netloc == google_hostname
|
||||
and parsed_url.path == search_path):
|
||||
# remove the link to google news
|
||||
continue
|
||||
|
||||
if parsed_url.netloc==google_hostname and parsed_url.path==images_path:
|
||||
# images result
|
||||
results = results + parse_images(result)
|
||||
# images result
|
||||
if (parsed_url.netloc == google_hostname
|
||||
and parsed_url.path == images_path):
|
||||
# only thumbnail image provided,
|
||||
# so skipping image results
|
||||
# results = results + parse_images(result)
|
||||
pass
|
||||
else:
|
||||
# normal result
|
||||
content = extract_text(result.xpath(content_xpath)[0])
|
||||
# append result
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
'content': content})
|
||||
except:
|
||||
continue
|
||||
|
@ -99,12 +110,13 @@ def response(resp):
|
|||
# return results
|
||||
return results
|
||||
|
||||
|
||||
def parse_images(result):
|
||||
results = []
|
||||
for image in result.xpath(images_xpath):
|
||||
url = parse_url(extract_text(image.xpath(image_url_xpath)[0]))
|
||||
img_src = extract_text(image.xpath(image_img_src_xpath)[0])
|
||||
|
||||
|
||||
# append result
|
||||
results.append({'url': url,
|
||||
'title': '',
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
## Google (Images)
|
||||
#
|
||||
#
|
||||
# @website https://www.google.com
|
||||
# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated!
|
||||
#
|
||||
# @provide-api yes (https://developers.google.com/web-search/docs/),
|
||||
# deprecated!
|
||||
#
|
||||
# @using-api yes
|
||||
# @results JSON
|
||||
# @stable yes (but deprecated)
|
||||
# @parse url, title, img_src
|
||||
|
||||
from urllib import urlencode
|
||||
from urllib import urlencode,unquote
|
||||
from json import loads
|
||||
|
||||
# engine dependent config
|
||||
|
@ -51,7 +52,7 @@ def response(resp):
|
|||
results.append({'url': href,
|
||||
'title': title,
|
||||
'content': '',
|
||||
'img_src': result['url'],
|
||||
'img_src': unquote(result['url']),
|
||||
'template': 'images.html'})
|
||||
|
||||
# return results
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
## Google (News)
|
||||
#
|
||||
#
|
||||
# @website https://www.google.com
|
||||
# @provide-api yes (https://developers.google.com/web-search/docs/), deprecated!
|
||||
#
|
||||
# @provide-api yes (https://developers.google.com/web-search/docs/),
|
||||
# deprecated!
|
||||
#
|
||||
# @using-api yes
|
||||
# @results JSON
|
||||
# @stable yes (but deprecated)
|
||||
|
|
|
@ -39,16 +39,16 @@ def response(resp):
|
|||
url = result_base_url.format(osm_type=osm_type,
|
||||
osm_id=r['osm_id'])
|
||||
|
||||
osm = {'type':osm_type,
|
||||
'id':r['osm_id']}
|
||||
osm = {'type': osm_type,
|
||||
'id': r['osm_id']}
|
||||
|
||||
geojson = r.get('geojson')
|
||||
geojson = r.get('geojson')
|
||||
|
||||
# if no geojson is found and osm_type is a node, add geojson Point
|
||||
if not geojson and\
|
||||
osm_type == 'node':
|
||||
geojson = {u'type':u'Point',
|
||||
u'coordinates':[r['lon'],r['lat']]}
|
||||
geojson = {u'type': u'Point',
|
||||
u'coordinates': [r['lon'], r['lat']]}
|
||||
|
||||
address_raw = r.get('address')
|
||||
address = {}
|
||||
|
@ -59,20 +59,20 @@ def response(resp):
|
|||
r['class'] == 'tourism' or\
|
||||
r['class'] == 'leisure':
|
||||
if address_raw.get('address29'):
|
||||
address = {'name':address_raw.get('address29')}
|
||||
address = {'name': address_raw.get('address29')}
|
||||
else:
|
||||
address = {'name':address_raw.get(r['type'])}
|
||||
address = {'name': address_raw.get(r['type'])}
|
||||
|
||||
# add rest of adressdata, if something is already found
|
||||
if address.get('name'):
|
||||
address.update({'house_number':address_raw.get('house_number'),
|
||||
'road':address_raw.get('road'),
|
||||
'locality':address_raw.get('city',
|
||||
address_raw.get('town',
|
||||
address_raw.get('village'))),
|
||||
'postcode':address_raw.get('postcode'),
|
||||
'country':address_raw.get('country'),
|
||||
'country_code':address_raw.get('country_code')})
|
||||
address.update({'house_number': address_raw.get('house_number'),
|
||||
'road': address_raw.get('road'),
|
||||
'locality': address_raw.get('city',
|
||||
address_raw.get('town',
|
||||
address_raw.get('village'))),
|
||||
'postcode': address_raw.get('postcode'),
|
||||
'country': address_raw.get('country'),
|
||||
'country_code': address_raw.get('country_code')})
|
||||
else:
|
||||
address = None
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
## Piratebay (Videos, Music, Files)
|
||||
#
|
||||
#
|
||||
# @website https://thepiratebay.se
|
||||
# @provide-api no (nothing found)
|
||||
#
|
||||
#
|
||||
# @using-api no
|
||||
# @results HTML (using search portal)
|
||||
# @stable yes (HTML can change)
|
||||
|
@ -19,11 +19,11 @@ categories = ['videos', 'music', 'files']
|
|||
paging = True
|
||||
|
||||
# search-url
|
||||
url = 'https://thepiratebay.se/'
|
||||
url = 'https://thepiratebay.cr/'
|
||||
search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
|
||||
|
||||
# piratebay specific type-definitions
|
||||
search_types = {'files': '0',
|
||||
search_types = {'files': '0',
|
||||
'music': '100',
|
||||
'videos': '200'}
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
## Soundcloud (Music)
|
||||
#
|
||||
#
|
||||
# @website https://soundcloud.com
|
||||
# @provide-api yes (https://developers.soundcloud.com/)
|
||||
#
|
||||
#
|
||||
# @using-api yes
|
||||
# @results JSON
|
||||
# @stable yes
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
## Stackoverflow (It)
|
||||
#
|
||||
#
|
||||
# @website https://stackoverflow.com/
|
||||
# @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
|
||||
#
|
||||
#
|
||||
# @using-api no
|
||||
# @results HTML
|
||||
# @stable no (HTML can change)
|
||||
|
@ -50,8 +50,8 @@ def response(resp):
|
|||
content = escape(' '.join(result.xpath(content_xpath)))
|
||||
|
||||
# append result
|
||||
results.append({'url': href,
|
||||
'title': title,
|
||||
results.append({'url': href,
|
||||
'title': title,
|
||||
'content': content})
|
||||
|
||||
# return results
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
## Twitter (Social media)
|
||||
#
|
||||
#
|
||||
# @website https://www.bing.com/news
|
||||
# @provide-api yes (https://dev.twitter.com/docs/using-search)
|
||||
#
|
||||
#
|
||||
# @using-api no
|
||||
# @results HTML (using search portal)
|
||||
# @stable no (HTML can change)
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
## Vimeo (Videos)
|
||||
#
|
||||
#
|
||||
# @website https://vimeo.com/
|
||||
# @provide-api yes (http://developer.vimeo.com/api), they have a maximum count of queries/hour
|
||||
#
|
||||
# @provide-api yes (http://developer.vimeo.com/api),
|
||||
# they have a maximum count of queries/hour
|
||||
#
|
||||
# @using-api no (TODO, rewrite to api)
|
||||
# @results HTML (using search portal)
|
||||
# @stable no (HTML can change)
|
||||
|
@ -35,11 +36,12 @@ publishedDate_xpath = './/p[@class="meta"]//attribute::datetime'
|
|||
|
||||
# do search-request
|
||||
def request(query, params):
|
||||
params['url'] = search_url.format(pageno=params['pageno'] ,
|
||||
params['url'] = search_url.format(pageno=params['pageno'],
|
||||
query=urlencode({'q': query}))
|
||||
|
||||
# TODO required?
|
||||
params['cookies']['__utma'] = '00000000.000#0000000.0000000000.0000000000.0000000000.0'
|
||||
params['cookies']['__utma'] =\
|
||||
'00000000.000#0000000.0000000000.0000000000.0000000000.0'
|
||||
|
||||
return params
|
||||
|
||||
|
|
|
@ -2,13 +2,25 @@ import json
|
|||
from requests import get
|
||||
from urllib import urlencode
|
||||
|
||||
resultCount=1
|
||||
urlSearch = 'https://www.wikidata.org/w/api.php?action=query&list=search&format=json&srnamespace=0&srprop=sectiontitle&{query}'
|
||||
urlDetail = 'https://www.wikidata.org/w/api.php?action=wbgetentities&format=json&props=labels%7Cinfo%7Csitelinks%7Csitelinks%2Furls%7Cdescriptions%7Cclaims&{query}'
|
||||
urlMap = 'https://www.openstreetmap.org/?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
|
||||
result_count = 1
|
||||
wikidata_host = 'https://www.wikidata.org'
|
||||
wikidata_api = wikidata_host + '/w/api.php'
|
||||
url_search = wikidata_api \
|
||||
+ '?action=query&list=search&format=json'\
|
||||
+ '&srnamespace=0&srprop=sectiontitle&{query}'
|
||||
url_detail = wikidata_api\
|
||||
+ '?action=wbgetentities&format=json'\
|
||||
+ '&props=labels%7Cinfo%7Csitelinks'\
|
||||
+ '%7Csitelinks%2Furls%7Cdescriptions%7Cclaims'\
|
||||
+ '&{query}'
|
||||
url_map = 'https://www.openstreetmap.org/'\
|
||||
+ '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
|
||||
|
||||
|
||||
def request(query, params):
|
||||
params['url'] = urlSearch.format(query=urlencode({'srsearch': query, 'srlimit': resultCount}))
|
||||
params['url'] = url_search.format(
|
||||
query=urlencode({'srsearch': query,
|
||||
'srlimit': result_count}))
|
||||
return params
|
||||
|
||||
|
||||
|
@ -23,7 +35,8 @@ def response(resp):
|
|||
language = resp.search_params['language'].split('_')[0]
|
||||
if language == 'all':
|
||||
language = 'en'
|
||||
url = urlDetail.format(query=urlencode({'ids': '|'.join(wikidata_ids), 'languages': language + '|en'}))
|
||||
url = url_detail.format(query=urlencode({'ids': '|'.join(wikidata_ids),
|
||||
'languages': language + '|en'}))
|
||||
|
||||
htmlresponse = get(url)
|
||||
jsonresponse = json.loads(htmlresponse.content)
|
||||
|
@ -32,6 +45,7 @@ def response(resp):
|
|||
|
||||
return results
|
||||
|
||||
|
||||
def getDetail(jsonresponse, wikidata_id, language):
|
||||
results = []
|
||||
urls = []
|
||||
|
@ -40,60 +54,103 @@ def getDetail(jsonresponse, wikidata_id, language):
|
|||
result = jsonresponse.get('entities', {}).get(wikidata_id, {})
|
||||
|
||||
title = result.get('labels', {}).get(language, {}).get('value', None)
|
||||
if title == None:
|
||||
if title is None:
|
||||
title = result.get('labels', {}).get('en', {}).get('value', None)
|
||||
if title == None:
|
||||
if title is None:
|
||||
return results
|
||||
|
||||
description = result.get('descriptions', {}).get(language, {}).get('value', None)
|
||||
if description == None:
|
||||
description = result.get('descriptions', {}).get('en', {}).get('value', '')
|
||||
description = result\
|
||||
.get('descriptions', {})\
|
||||
.get(language, {})\
|
||||
.get('value', None)
|
||||
|
||||
if description is None:
|
||||
description = result\
|
||||
.get('descriptions', {})\
|
||||
.get('en', {})\
|
||||
.get('value', '')
|
||||
|
||||
claims = result.get('claims', {})
|
||||
official_website = get_string(claims, 'P856', None)
|
||||
if official_website != None:
|
||||
urls.append({ 'title' : 'Official site', 'url': official_website })
|
||||
results.append({ 'title': title, 'url' : official_website })
|
||||
if official_website is not None:
|
||||
urls.append({'title': 'Official site', 'url': official_website})
|
||||
results.append({'title': title, 'url': official_website})
|
||||
|
||||
wikipedia_link_count = 0
|
||||
if language != 'en':
|
||||
wikipedia_link_count += add_url(urls, 'Wikipedia (' + language + ')', get_wikilink(result, language + 'wiki'))
|
||||
wikipedia_link_count += add_url(urls,
|
||||
'Wikipedia (' + language + ')',
|
||||
get_wikilink(result, language +
|
||||
'wiki'))
|
||||
wikipedia_en_link = get_wikilink(result, 'enwiki')
|
||||
wikipedia_link_count += add_url(urls, 'Wikipedia (en)', wikipedia_en_link)
|
||||
wikipedia_link_count += add_url(urls,
|
||||
'Wikipedia (en)',
|
||||
wikipedia_en_link)
|
||||
if wikipedia_link_count == 0:
|
||||
misc_language = get_wiki_firstlanguage(result, 'wiki')
|
||||
if misc_language != None:
|
||||
add_url(urls, 'Wikipedia (' + misc_language + ')', get_wikilink(result, misc_language + 'wiki'))
|
||||
if misc_language is not None:
|
||||
add_url(urls,
|
||||
'Wikipedia (' + misc_language + ')',
|
||||
get_wikilink(result, misc_language + 'wiki'))
|
||||
|
||||
if language != 'en':
|
||||
add_url(urls, 'Wiki voyage (' + language + ')', get_wikilink(result, language + 'wikivoyage'))
|
||||
add_url(urls, 'Wiki voyage (en)', get_wikilink(result, 'enwikivoyage'))
|
||||
add_url(urls,
|
||||
'Wiki voyage (' + language + ')',
|
||||
get_wikilink(result, language + 'wikivoyage'))
|
||||
|
||||
add_url(urls,
|
||||
'Wiki voyage (en)',
|
||||
get_wikilink(result, 'enwikivoyage'))
|
||||
|
||||
if language != 'en':
|
||||
add_url(urls, 'Wikiquote (' + language + ')', get_wikilink(result, language + 'wikiquote'))
|
||||
add_url(urls, 'Wikiquote (en)', get_wikilink(result, 'enwikiquote'))
|
||||
add_url(urls,
|
||||
'Wikiquote (' + language + ')',
|
||||
get_wikilink(result, language + 'wikiquote'))
|
||||
|
||||
add_url(urls, 'Commons wiki', get_wikilink(result, 'commonswiki'))
|
||||
add_url(urls,
|
||||
'Wikiquote (en)',
|
||||
get_wikilink(result, 'enwikiquote'))
|
||||
|
||||
add_url(urls, 'Location', get_geolink(claims, 'P625', None))
|
||||
add_url(urls,
|
||||
'Commons wiki',
|
||||
get_wikilink(result, 'commonswiki'))
|
||||
|
||||
add_url(urls, 'Wikidata', 'https://www.wikidata.org/wiki/' + wikidata_id + '?uselang='+ language)
|
||||
add_url(urls,
|
||||
'Location',
|
||||
get_geolink(claims, 'P625', None))
|
||||
|
||||
add_url(urls,
|
||||
'Wikidata',
|
||||
'https://www.wikidata.org/wiki/'
|
||||
+ wikidata_id + '?uselang=' + language)
|
||||
|
||||
musicbrainz_work_id = get_string(claims, 'P435')
|
||||
if musicbrainz_work_id != None:
|
||||
add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/work/' + musicbrainz_work_id)
|
||||
if musicbrainz_work_id is not None:
|
||||
add_url(urls,
|
||||
'MusicBrainz',
|
||||
'http://musicbrainz.org/work/'
|
||||
+ musicbrainz_work_id)
|
||||
|
||||
musicbrainz_artist_id = get_string(claims, 'P434')
|
||||
if musicbrainz_artist_id != None:
|
||||
add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/artist/' + musicbrainz_artist_id)
|
||||
if musicbrainz_artist_id is not None:
|
||||
add_url(urls,
|
||||
'MusicBrainz',
|
||||
'http://musicbrainz.org/artist/'
|
||||
+ musicbrainz_artist_id)
|
||||
|
||||
musicbrainz_release_group_id = get_string(claims, 'P436')
|
||||
if musicbrainz_release_group_id != None:
|
||||
add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/release-group/' + musicbrainz_release_group_id)
|
||||
if musicbrainz_release_group_id is not None:
|
||||
add_url(urls,
|
||||
'MusicBrainz',
|
||||
'http://musicbrainz.org/release-group/'
|
||||
+ musicbrainz_release_group_id)
|
||||
|
||||
musicbrainz_label_id = get_string(claims, 'P966')
|
||||
if musicbrainz_label_id != None:
|
||||
add_url(urls, 'MusicBrainz', 'http://musicbrainz.org/label/' + musicbrainz_label_id)
|
||||
if musicbrainz_label_id is not None:
|
||||
add_url(urls,
|
||||
'MusicBrainz',
|
||||
'http://musicbrainz.org/label/'
|
||||
+ musicbrainz_label_id)
|
||||
|
||||
# musicbrainz_area_id = get_string(claims, 'P982')
|
||||
# P1407 MusicBrainz series ID
|
||||
|
@ -102,42 +159,43 @@ def getDetail(jsonresponse, wikidata_id, language):
|
|||
# P1407 MusicBrainz series ID
|
||||
|
||||
postal_code = get_string(claims, 'P281', None)
|
||||
if postal_code != None:
|
||||
attributes.append({'label' : 'Postal code(s)', 'value' : postal_code})
|
||||
if postal_code is not None:
|
||||
attributes.append({'label': 'Postal code(s)', 'value': postal_code})
|
||||
|
||||
date_of_birth = get_time(claims, 'P569', None)
|
||||
if date_of_birth != None:
|
||||
attributes.append({'label' : 'Date of birth', 'value' : date_of_birth})
|
||||
if date_of_birth is not None:
|
||||
attributes.append({'label': 'Date of birth', 'value': date_of_birth})
|
||||
|
||||
date_of_death = get_time(claims, 'P570', None)
|
||||
if date_of_death != None:
|
||||
attributes.append({'label' : 'Date of death', 'value' : date_of_death})
|
||||
if date_of_death is not None:
|
||||
attributes.append({'label': 'Date of death', 'value': date_of_death})
|
||||
|
||||
if len(attributes)==0 and len(urls)==2 and len(description)==0:
|
||||
if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
|
||||
results.append({
|
||||
'url': urls[0]['url'],
|
||||
'title': title,
|
||||
'content': description
|
||||
})
|
||||
'url': urls[0]['url'],
|
||||
'title': title,
|
||||
'content': description
|
||||
})
|
||||
else:
|
||||
results.append({
|
||||
'infobox' : title,
|
||||
'id' : wikipedia_en_link,
|
||||
'content' : description,
|
||||
'attributes' : attributes,
|
||||
'urls' : urls
|
||||
})
|
||||
'infobox': title,
|
||||
'id': wikipedia_en_link,
|
||||
'content': description,
|
||||
'attributes': attributes,
|
||||
'urls': urls
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def add_url(urls, title, url):
|
||||
if url != None:
|
||||
urls.append({'title' : title, 'url' : url})
|
||||
if url is not None:
|
||||
urls.append({'title': title, 'url': url})
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
|
||||
def get_mainsnak(claims, propertyName):
|
||||
propValue = claims.get(propertyName, {})
|
||||
if len(propValue) == 0:
|
||||
|
@ -157,7 +215,7 @@ def get_string(claims, propertyName, defaultValue=None):
|
|||
mainsnak = e.get('mainsnak', {})
|
||||
|
||||
datavalue = mainsnak.get('datavalue', {})
|
||||
if datavalue != None:
|
||||
if datavalue is not None:
|
||||
result.append(datavalue.get('value', ''))
|
||||
|
||||
if len(result) == 0:
|
||||
|
@ -177,7 +235,7 @@ def get_time(claims, propertyName, defaultValue=None):
|
|||
mainsnak = e.get('mainsnak', {})
|
||||
|
||||
datavalue = mainsnak.get('datavalue', {})
|
||||
if datavalue != None:
|
||||
if datavalue is not None:
|
||||
value = datavalue.get('value', '')
|
||||
result.append(value.get('time', ''))
|
||||
|
||||
|
@ -190,7 +248,7 @@ def get_time(claims, propertyName, defaultValue=None):
|
|||
def get_geolink(claims, propertyName, defaultValue=''):
|
||||
mainsnak = get_mainsnak(claims, propertyName)
|
||||
|
||||
if mainsnak == None:
|
||||
if mainsnak is None:
|
||||
return defaultValue
|
||||
|
||||
datatype = mainsnak.get('datatype', '')
|
||||
|
@ -209,21 +267,25 @@ def get_geolink(claims, propertyName, defaultValue=''):
|
|||
# 1 --> 6
|
||||
# 0.016666666666667 --> 9
|
||||
# 0.00027777777777778 --> 19
|
||||
# wolframalpha : quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
|
||||
# wolframalpha :
|
||||
# quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
|
||||
# 14.1186-8.8322 x+0.625447 x^2
|
||||
if precision < 0.0003:
|
||||
zoom = 19
|
||||
else:
|
||||
zoom = int(15 - precision*8.8322 + precision*precision*0.625447)
|
||||
|
||||
url = urlMap.replace('{latitude}', str(value.get('latitude',0))).replace('{longitude}', str(value.get('longitude',0))).replace('{zoom}', str(zoom))
|
||||
url = url_map\
|
||||
.replace('{latitude}', str(value.get('latitude', 0)))\
|
||||
.replace('{longitude}', str(value.get('longitude', 0)))\
|
||||
.replace('{zoom}', str(zoom))
|
||||
|
||||
return url
|
||||
|
||||
|
||||
def get_wikilink(result, wikiid):
|
||||
url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
|
||||
if url == None:
|
||||
if url is None:
|
||||
return url
|
||||
elif url.startswith('http://'):
|
||||
url = url.replace('http://', 'https://')
|
||||
|
@ -231,8 +293,9 @@ def get_wikilink(result, wikiid):
|
|||
url = 'https:' + url
|
||||
return url
|
||||
|
||||
|
||||
def get_wiki_firstlanguage(result, wikipatternid):
|
||||
for k in result.get('sitelinks', {}).keys():
|
||||
if k.endswith(wikipatternid) and len(k)==(2+len(wikipatternid)):
|
||||
if k.endswith(wikipatternid) and len(k) == (2+len(wikipatternid)):
|
||||
return k[0:2]
|
||||
return None
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
## Yacy (Web, Images, Videos, Music, Files)
|
||||
#
|
||||
#
|
||||
# @website http://yacy.net
|
||||
# @provide-api yes (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
|
||||
#
|
||||
# @provide-api yes
|
||||
# (http://www.yacy-websuche.de/wiki/index.php/Dev:APIyacysearch)
|
||||
#
|
||||
# @using-api yes
|
||||
# @results JSON
|
||||
# @stable yes
|
||||
|
@ -16,7 +17,7 @@ from urllib import urlencode
|
|||
from dateutil import parser
|
||||
|
||||
# engine dependent config
|
||||
categories = ['general', 'images'] #TODO , 'music', 'videos', 'files'
|
||||
categories = ['general', 'images'] # TODO , 'music', 'videos', 'files'
|
||||
paging = True
|
||||
language_support = True
|
||||
number_of_results = 5
|
||||
|
@ -28,7 +29,7 @@ search_url = '/yacysearch.json?{query}&startRecord={offset}&maximumRecords={limi
|
|||
# yacy specific type-definitions
|
||||
search_types = {'general': 'text',
|
||||
'images': 'image',
|
||||
'files': 'app',
|
||||
'files': 'app',
|
||||
'music': 'audio',
|
||||
'videos': 'video'}
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
## Yahoo (Web)
|
||||
#
|
||||
#
|
||||
# @website https://search.yahoo.com/web
|
||||
# @provide-api yes (https://developer.yahoo.com/boss/search/), $0.80/1000 queries
|
||||
#
|
||||
# @provide-api yes (https://developer.yahoo.com/boss/search/),
|
||||
# $0.80/1000 queries
|
||||
#
|
||||
# @using-api no (because pricing)
|
||||
# @results HTML (using search portal)
|
||||
# @stable no (HTML can change)
|
||||
|
@ -40,8 +41,8 @@ def parse_url(url_string):
|
|||
if endpos > -1:
|
||||
endpositions.append(endpos)
|
||||
|
||||
if start==0 or len(endpositions) == 0:
|
||||
return url_string
|
||||
if start == 0 or len(endpositions) == 0:
|
||||
return url_string
|
||||
else:
|
||||
end = min(endpositions)
|
||||
return unquote(url_string[start:end])
|
||||
|
@ -84,8 +85,8 @@ def response(resp):
|
|||
content = extract_text(result.xpath(content_xpath)[0])
|
||||
|
||||
# append result
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
results.append({'url': url,
|
||||
'title': title,
|
||||
'content': content})
|
||||
|
||||
# if no suggestion found, return results
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
## Youtube (Videos)
|
||||
#
|
||||
#
|
||||
# @website https://www.youtube.com/
|
||||
# @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
|
||||
#
|
||||
#
|
||||
# @using-api yes
|
||||
# @results JSON
|
||||
# @stable yes
|
||||
|
|
|
@ -15,7 +15,8 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
|||
(C) 2013- by Adam Tauber, <asciimoo@gmail.com>
|
||||
'''
|
||||
|
||||
import grequests
|
||||
import requests as requests_lib
|
||||
import threading
|
||||
import re
|
||||
from itertools import izip_longest, chain
|
||||
from datetime import datetime
|
||||
|
@ -32,6 +33,21 @@ from searx.query import Query
|
|||
number_of_searches = 0
|
||||
|
||||
|
||||
def threaded_requests(requests):
|
||||
for fn, url, request_args in requests:
|
||||
th = threading.Thread(
|
||||
target=fn,
|
||||
args=(url,),
|
||||
kwargs=request_args,
|
||||
name='search_request',
|
||||
)
|
||||
th.start()
|
||||
|
||||
for th in threading.enumerate():
|
||||
if th.name == 'search_request':
|
||||
th.join()
|
||||
|
||||
|
||||
# get default reqest parameter
|
||||
def default_request_params():
|
||||
return {
|
||||
|
@ -471,9 +487,9 @@ class Search(object):
|
|||
|
||||
# specific type of request (GET or POST)
|
||||
if request_params['method'] == 'GET':
|
||||
req = grequests.get
|
||||
req = requests_lib.get
|
||||
else:
|
||||
req = grequests.post
|
||||
req = requests_lib.post
|
||||
request_args['data'] = request_params['data']
|
||||
|
||||
# ignoring empty urls
|
||||
|
@ -481,10 +497,10 @@ class Search(object):
|
|||
continue
|
||||
|
||||
# append request to list
|
||||
requests.append(req(request_params['url'], **request_args))
|
||||
requests.append((req, request_params['url'], request_args))
|
||||
|
||||
# send all search-request
|
||||
grequests.map(requests)
|
||||
threaded_requests(requests)
|
||||
|
||||
# update engine-specific stats
|
||||
for engine_name, engine_results in results.items():
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
server:
|
||||
port : 8888
|
||||
secret_key : "ultrasecretkey" # change this!
|
||||
debug : False # Debug mode, only for development
|
||||
debug : True # Debug mode, only for development
|
||||
request_timeout : 2.0 # seconds
|
||||
base_url : False # Set custom base_url. Possible values: False or "https://your.custom.host/location/"
|
||||
themes_path : "" # Custom ui themes path
|
||||
|
@ -95,9 +95,9 @@ engines:
|
|||
engine : openstreetmap
|
||||
shortcut : osm
|
||||
|
||||
- name : piratebay
|
||||
engine : piratebay
|
||||
shortcut : tpb
|
||||
# - name : piratebay
|
||||
# engine : piratebay
|
||||
# shortcut : tpb
|
||||
|
||||
- name : soundcloud
|
||||
engine : soundcloud
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
node_modules/
|
|
@ -0,0 +1,17 @@
|
|||
install dependencies
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
run this command in the directory ``searx/static/oscar``
|
||||
|
||||
``npm install``
|
||||
|
||||
compile sources
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
run this command in the directory ``searx/static/oscar``
|
||||
|
||||
``grunt``
|
||||
|
||||
or in the root directory:
|
||||
|
||||
``make grunt``
|
|
@ -0,0 +1,51 @@
|
|||
module.exports = function(grunt) {
|
||||
|
||||
grunt.initConfig({
|
||||
pkg: grunt.file.readJSON('package.json'),
|
||||
concat: {
|
||||
options: {
|
||||
separator: ';'
|
||||
},
|
||||
dist: {
|
||||
src: ['js/searx_src/*.js'],
|
||||
dest: 'js/searx.js'
|
||||
}
|
||||
},
|
||||
uglify: {
|
||||
options: {
|
||||
banner: '/*! oscar/searx.min.js | <%= grunt.template.today("dd-mm-yyyy") %> | https://github.com/asciimoo/searx */\n'
|
||||
},
|
||||
dist: {
|
||||
files: {
|
||||
'js/searx.min.js': ['<%= concat.dist.dest %>']
|
||||
}
|
||||
}
|
||||
},
|
||||
jshint: {
|
||||
files: ['gruntfile.js', 'js/searx_src/*.js'],
|
||||
options: {
|
||||
// options here to override JSHint defaults
|
||||
globals: {
|
||||
jQuery: true,
|
||||
console: true,
|
||||
module: true,
|
||||
document: true
|
||||
}
|
||||
}
|
||||
},
|
||||
watch: {
|
||||
files: ['<%= jshint.files %>'],
|
||||
tasks: ['jshint']
|
||||
}
|
||||
});
|
||||
|
||||
grunt.loadNpmTasks('grunt-contrib-uglify');
|
||||
grunt.loadNpmTasks('grunt-contrib-jshint');
|
||||
grunt.loadNpmTasks('grunt-contrib-watch');
|
||||
grunt.loadNpmTasks('grunt-contrib-concat');
|
||||
|
||||
grunt.registerTask('test', ['jshint']);
|
||||
|
||||
grunt.registerTask('default', ['jshint', 'concat', 'uglify']);
|
||||
|
||||
};
|
|
@ -0,0 +1,2 @@
|
|||
/*! oscar/searx.min.js | 30-11-2014 | https://github.com/asciimoo/searx */
|
||||
requirejs.config({baseUrl:"/static/oscar/js",paths:{app:"../app"}}),searx.autocompleter&&(searx.searchResults=new Bloodhound({datumTokenizer:Bloodhound.tokenizers.obj.whitespace("value"),queryTokenizer:Bloodhound.tokenizers.whitespace,remote:"/autocompleter?q=%QUERY"}),searx.searchResults.initialize()),$(document).ready(function(){searx.autocompleter&&$("#q").typeahead(null,{name:"search-results",displayKey:function(a){return a},source:searx.searchResults.ttAdapter()})}),$(document).ready(function(){$("#q.autofocus").focus(),$(".select-all-on-click").click(function(){$(this).select()}),$(".btn-collapse").click(function(){var a=$(this).data("btn-text-collapsed"),b=$(this).data("btn-text-not-collapsed");""!==a&&""!==b&&(new_html=$(this).hasClass("collapsed")?$(this).html().replace(a,b):$(this).html().replace(b,a),$(this).html(new_html))}),$(".btn-toggle .btn").click(function(){var a="btn-"+$(this).data("btn-class"),b=$(this).data("btn-label-default"),c=$(this).data("btn-label-toggled");""!==c&&(new_html=$(this).hasClass("btn-default")?$(this).html().replace(b,c):$(this).html().replace(c,b),$(this).html(new_html)),$(this).toggleClass(a),$(this).toggleClass("btn-default")})}),$(document).ready(function(){$(".searx_overpass_request").on("click",function(a){var b="https://overpass-api.de/api/interpreter?data=",c=b+"[out:json][timeout:25];(",d=");out meta;",e=$(this).data("osm-id"),f=$(this).data("osm-type"),g=$(this).data("result-table"),h="#"+$(this).data("result-table-loadicon"),i=["addr:city","addr:country","addr:housenumber","addr:postcode","addr:street"];if(e&&f&&g){g="#"+g;var j=null;switch(f){case"node":j=c+"node("+e+");"+d;break;case"way":j=c+"way("+e+");"+d;break;case"relation":j=c+"relation("+e+");"+d}if(j){$.ajax(j).done(function(a){if(a&&a.elements&&a.elements[0]){var b=a.elements[0],c=$(g).html();for(var d in b.tags)if(null===b.tags.name||-1==i.indexOf(d)){switch(c+="<tr><td>"+d+"</td><td>",d){case"phone":case"fax":c+='<a href="tel:'+b.tags[d].replace(/ /g,"")+'">'+b.tags[d]+"</a>";break;case"email":c+='<a href="mailto:'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"website":case"url":c+='<a href="'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"wikidata":c+='<a href="https://www.wikidata.org/wiki/'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"wikipedia":if(-1!=b.tags[d].indexOf(":")){c+='<a href="https://'+b.tags[d].substring(0,b.tags[d].indexOf(":"))+".wikipedia.org/wiki/"+b.tags[d].substring(b.tags[d].indexOf(":")+1)+'">'+b.tags[d]+"</a>";break}default:c+=b.tags[d]}c+="</td></tr>"}$(g).html(c),$(g).removeClass("hidden"),$(h).addClass("hidden")}}).fail(function(){$(h).html($(h).html()+'<p class="text-muted">could not load data!</p>')})}}$(this).off(a)}),$(".searx_init_map").on("click",function(a){var b=$(this).data("leaflet-target"),c=$(this).data("map-lon"),d=$(this).data("map-lat"),e=$(this).data("map-zoom"),f=$(this).data("map-boundingbox"),g=$(this).data("map-geojson");require(["leaflet-0.7.3.min"],function(){f&&(southWest=L.latLng(f[0],f[2]),northEast=L.latLng(f[1],f[3]),map_bounds=L.latLngBounds(southWest,northEast)),L.Icon.Default.imagePath="/static/oscar/img/map";{var a=L.map(b),h="https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png",i='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors',j=new L.TileLayer(h,{minZoom:1,maxZoom:19,attribution:i}),k="http://otile{s}.mqcdn.com/tiles/1.0.0/map/{z}/{x}/{y}.jpg",l='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="http://developer.mapquest.com/content/osm/mq_logo.png">',m=new L.TileLayer(k,{minZoom:1,maxZoom:18,subdomains:"1234",attribution:l}),n="http://otile{s}.mqcdn.com/tiles/1.0.0/sat/{z}/{x}/{y}.jpg",o='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="https://developer.mapquest.com/content/osm/mq_logo.png"> | Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency';new L.TileLayer(n,{minZoom:1,maxZoom:11,subdomains:"1234",attribution:o})}map_bounds?setTimeout(function(){a.fitBounds(map_bounds,{maxZoom:17})},0):c&&d&&(e?a.setView(new L.LatLng(d,c),e):a.setView(new L.LatLng(d,c),8)),a.addLayer(m);var p={"OSM Mapnik":j,MapQuest:m};L.control.layers(p).addTo(a),g&&L.geoJson(g).addTo(a)}),$(this).off(a)})});
|
|
@ -0,0 +1,23 @@
|
|||
/**
|
||||
* searx is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* searx is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||
*
|
||||
* (C) 2014 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
|
||||
*/
|
||||
|
||||
requirejs.config({
|
||||
baseUrl: '/static/oscar/js',
|
||||
paths: {
|
||||
app: '../app'
|
||||
}
|
||||
});
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* searx is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* searx is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||
*
|
||||
* (C) 2014 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
|
||||
*/
|
||||
|
||||
if(searx.autocompleter) {
|
||||
searx.searchResults = new Bloodhound({
|
||||
datumTokenizer: Bloodhound.tokenizers.obj.whitespace('value'),
|
||||
queryTokenizer: Bloodhound.tokenizers.whitespace,
|
||||
remote: '/autocompleter?q=%QUERY'
|
||||
});
|
||||
searx.searchResults.initialize();
|
||||
}
|
||||
|
||||
$(document).ready(function(){
|
||||
if(searx.autocompleter) {
|
||||
$('#q').typeahead(null, {
|
||||
name: 'search-results',
|
||||
displayKey: function(result) {
|
||||
return result;
|
||||
},
|
||||
source: searx.searchResults.ttAdapter()
|
||||
});
|
||||
}
|
||||
});
|
|
@ -0,0 +1,66 @@
|
|||
/**
|
||||
* searx is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* searx is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||
*
|
||||
* (C) 2014 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
|
||||
*/
|
||||
|
||||
$(document).ready(function(){
|
||||
/**
|
||||
* focus element if class="autofocus" and id="q"
|
||||
*/
|
||||
$('#q.autofocus').focus();
|
||||
|
||||
/**
|
||||
* select full content on click if class="select-all-on-click"
|
||||
*/
|
||||
$(".select-all-on-click").click(function () {
|
||||
$(this).select();
|
||||
});
|
||||
|
||||
/**
|
||||
* change text during btn-collapse click if possible
|
||||
*/
|
||||
$('.btn-collapse').click(function() {
|
||||
var btnTextCollapsed = $(this).data('btn-text-collapsed');
|
||||
var btnTextNotCollapsed = $(this).data('btn-text-not-collapsed');
|
||||
|
||||
if(btnTextCollapsed !== '' && btnTextNotCollapsed !== '') {
|
||||
if($(this).hasClass('collapsed')) {
|
||||
new_html = $(this).html().replace(btnTextCollapsed, btnTextNotCollapsed);
|
||||
} else {
|
||||
new_html = $(this).html().replace(btnTextNotCollapsed, btnTextCollapsed);
|
||||
}
|
||||
$(this).html(new_html);
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* change text during btn-toggle click if possible
|
||||
*/
|
||||
$('.btn-toggle .btn').click(function() {
|
||||
var btnClass = 'btn-' + $(this).data('btn-class');
|
||||
var btnLabelDefault = $(this).data('btn-label-default');
|
||||
var btnLabelToggled = $(this).data('btn-label-toggled');
|
||||
if(btnLabelToggled !== '') {
|
||||
if($(this).hasClass('btn-default')) {
|
||||
new_html = $(this).html().replace(btnLabelDefault, btnLabelToggled);
|
||||
} else {
|
||||
new_html = $(this).html().replace(btnLabelToggled, btnLabelDefault);
|
||||
}
|
||||
$(this).html(new_html);
|
||||
}
|
||||
$(this).toggleClass(btnClass);
|
||||
$(this).toggleClass('btn-default');
|
||||
});
|
||||
});
|
|
@ -0,0 +1,172 @@
|
|||
/**
|
||||
* searx is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* searx is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Affero General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||
*
|
||||
* (C) 2014 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
|
||||
*/
|
||||
|
||||
$(document).ready(function(){
|
||||
$(".searx_overpass_request").on( "click", function( event ) {
|
||||
var overpass_url = "https://overpass-api.de/api/interpreter?data=";
|
||||
var query_start = overpass_url + "[out:json][timeout:25];(";
|
||||
var query_end = ");out meta;";
|
||||
|
||||
var osm_id = $(this).data('osm-id');
|
||||
var osm_type = $(this).data('osm-type');
|
||||
var result_table = $(this).data('result-table');
|
||||
var result_table_loadicon = "#" + $(this).data('result-table-loadicon');
|
||||
|
||||
// tags which can be ignored
|
||||
var osm_ignore_tags = [ "addr:city", "addr:country", "addr:housenumber", "addr:postcode", "addr:street" ];
|
||||
|
||||
if(osm_id && osm_type && result_table) {
|
||||
result_table = "#" + result_table;
|
||||
var query = null;
|
||||
switch(osm_type) {
|
||||
case 'node':
|
||||
query = query_start + "node(" + osm_id + ");" + query_end;
|
||||
break;
|
||||
case 'way':
|
||||
query = query_start + "way(" + osm_id + ");" + query_end;
|
||||
break;
|
||||
case 'relation':
|
||||
query = query_start + "relation(" + osm_id + ");" + query_end;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
if(query) {
|
||||
//alert(query);
|
||||
var ajaxRequest = $.ajax( query )
|
||||
.done(function( html) {
|
||||
if(html && html.elements && html.elements[0]) {
|
||||
var element = html.elements[0];
|
||||
var newHtml = $(result_table).html();
|
||||
for (var row in element.tags) {
|
||||
if(element.tags.name === null || osm_ignore_tags.indexOf(row) == -1) {
|
||||
newHtml += "<tr><td>" + row + "</td><td>";
|
||||
switch(row) {
|
||||
case "phone":
|
||||
case "fax":
|
||||
newHtml += "<a href=\"tel:" + element.tags[row].replace(/ /g,'') + "\">" + element.tags[row] + "</a>";
|
||||
break;
|
||||
case "email":
|
||||
newHtml += "<a href=\"mailto:" + element.tags[row] + "\">" + element.tags[row] + "</a>";
|
||||
break;
|
||||
case "website":
|
||||
case "url":
|
||||
newHtml += "<a href=\"" + element.tags[row] + "\">" + element.tags[row] + "</a>";
|
||||
break;
|
||||
case "wikidata":
|
||||
newHtml += "<a href=\"https://www.wikidata.org/wiki/" + element.tags[row] + "\">" + element.tags[row] + "</a>";
|
||||
break;
|
||||
case "wikipedia":
|
||||
if(element.tags[row].indexOf(":") != -1) {
|
||||
newHtml += "<a href=\"https://" + element.tags[row].substring(0,element.tags[row].indexOf(":")) + ".wikipedia.org/wiki/" + element.tags[row].substring(element.tags[row].indexOf(":")+1) + "\">" + element.tags[row] + "</a>";
|
||||
break;
|
||||
}
|
||||
/* jshint ignore:start */
|
||||
default:
|
||||
/* jshint ignore:end */
|
||||
newHtml += element.tags[row];
|
||||
break;
|
||||
}
|
||||
newHtml += "</td></tr>";
|
||||
}
|
||||
}
|
||||
$(result_table).html(newHtml);
|
||||
$(result_table).removeClass('hidden');
|
||||
$(result_table_loadicon).addClass('hidden');
|
||||
}
|
||||
})
|
||||
.fail(function() {
|
||||
$(result_table_loadicon).html($(result_table_loadicon).html() + "<p class=\"text-muted\">could not load data!</p>");
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// this event occour only once per element
|
||||
$( this ).off( event );
|
||||
});
|
||||
|
||||
$(".searx_init_map").on( "click", function( event ) {
|
||||
var leaflet_target = $(this).data('leaflet-target');
|
||||
var map_lon = $(this).data('map-lon');
|
||||
var map_lat = $(this).data('map-lat');
|
||||
var map_zoom = $(this).data('map-zoom');
|
||||
var map_boundingbox = $(this).data('map-boundingbox');
|
||||
var map_geojson = $(this).data('map-geojson');
|
||||
|
||||
require(['leaflet-0.7.3.min'], function(leaflet) {
|
||||
if(map_boundingbox) {
|
||||
southWest = L.latLng(map_boundingbox[0], map_boundingbox[2]);
|
||||
northEast = L.latLng(map_boundingbox[1], map_boundingbox[3]);
|
||||
map_bounds = L.latLngBounds(southWest, northEast);
|
||||
}
|
||||
|
||||
// TODO hack
|
||||
// change default imagePath
|
||||
L.Icon.Default.imagePath = "/static/oscar/img/map";
|
||||
|
||||
// init map
|
||||
var map = L.map(leaflet_target);
|
||||
|
||||
// create the tile layer with correct attribution
|
||||
var osmMapnikUrl='https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png';
|
||||
var osmMapnikAttrib='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors';
|
||||
var osmMapnik = new L.TileLayer(osmMapnikUrl, {minZoom: 1, maxZoom: 19, attribution: osmMapnikAttrib});
|
||||
|
||||
var osmMapquestUrl='http://otile{s}.mqcdn.com/tiles/1.0.0/map/{z}/{x}/{y}.jpg';
|
||||
var osmMapquestAttrib='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="http://developer.mapquest.com/content/osm/mq_logo.png">';
|
||||
var osmMapquest = new L.TileLayer(osmMapquestUrl, {minZoom: 1, maxZoom: 18, subdomains: '1234', attribution: osmMapquestAttrib});
|
||||
|
||||
var osmMapquestOpenAerialUrl='http://otile{s}.mqcdn.com/tiles/1.0.0/sat/{z}/{x}/{y}.jpg';
|
||||
var osmMapquestOpenAerialAttrib='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="https://developer.mapquest.com/content/osm/mq_logo.png"> | Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency';
|
||||
var osmMapquestOpenAerial = new L.TileLayer(osmMapquestOpenAerialUrl, {minZoom: 1, maxZoom: 11, subdomains: '1234', attribution: osmMapquestOpenAerialAttrib});
|
||||
|
||||
// init map view
|
||||
if(map_bounds) {
|
||||
// TODO hack: https://github.com/Leaflet/Leaflet/issues/2021
|
||||
setTimeout(function () {
|
||||
map.fitBounds(map_bounds, {
|
||||
maxZoom:17
|
||||
});
|
||||
}, 0);
|
||||
} else if (map_lon && map_lat) {
|
||||
if(map_zoom)
|
||||
map.setView(new L.LatLng(map_lat, map_lon),map_zoom);
|
||||
else
|
||||
map.setView(new L.LatLng(map_lat, map_lon),8);
|
||||
}
|
||||
|
||||
map.addLayer(osmMapquest);
|
||||
|
||||
var baseLayers = {
|
||||
"OSM Mapnik": osmMapnik,
|
||||
"MapQuest": osmMapquest/*,
|
||||
"MapQuest Open Aerial": osmMapquestOpenAerial*/
|
||||
};
|
||||
|
||||
L.control.layers(baseLayers).addTo(map);
|
||||
|
||||
|
||||
if(map_geojson)
|
||||
L.geoJson(map_geojson).addTo(map);
|
||||
/*else if(map_bounds)
|
||||
L.rectangle(map_bounds, {color: "#ff7800", weight: 3, fill:false}).addTo(map);*/
|
||||
});
|
||||
|
||||
// this event occour only once per element
|
||||
$( this ).off( event );
|
||||
});
|
||||
});
|
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"devDependencies": {
|
||||
"grunt": "~0.4.5",
|
||||
"grunt-contrib-uglify": "~0.6.0",
|
||||
"grunt-contrib-watch" : "~0.6.1",
|
||||
"grunt-contrib-concat" : "~0.5.0",
|
||||
"grunt-contrib-jshint" : "~0.10.0"
|
||||
},
|
||||
|
||||
"scripts": {
|
||||
"build": "npm install && grunt",
|
||||
"start": "grunt watch",
|
||||
"test": "grunt"
|
||||
}
|
||||
}
|
|
@ -19,14 +19,14 @@
|
|||
|
||||
<p>Searx is a <a href="https://en.wikipedia.org/wiki/Metasearch_engine">metasearch engine</a>,
|
||||
inspired by the <a href="http://seeks-project.info/">seeks project</a>.<br />
|
||||
It provides basic privacy by mixing your queries with searches on other platforms without storing search data. Queries are made using a POST request on every browser (except chrome*). Therefore they show up in neither our logs, nor your url history. In case of Chrome* users there is an exception, Searx uses the search bar to perform GET requests.<br />
|
||||
It provides basic privacy by mixing your queries with searches on other platforms without storing search data. Queries are made using a POST request on every browser (except chrome*). Therefore they show up in neither our logs, nor your url history. In case of Chrome* users there is an exception, if Searx used from the search bar it performs GET requests.<br />
|
||||
Searx can be added to your browser's search bar; moreover, it can be set as the default search engine.
|
||||
</p>
|
||||
|
||||
<h2>How can I make it my own?</h2>
|
||||
|
||||
<p>Searx appreciates your concern regarding logs, so take the <a href="https://github.com/asciimoo/searx">code</a> and run it yourself! <br />Add your Searx to this <a href="https://github.com/asciimoo/searx/wiki/Searx-instances">list</a> to help other people reclaim their privacy and make the Internet freer!
|
||||
<br />The more decentralized the Internet, is the more freedom we have!</p>
|
||||
<br />The more decentralized Internet is the more freedom we have!</p>
|
||||
|
||||
|
||||
<h2>More about searx</h2>
|
||||
|
|
|
@ -73,6 +73,6 @@
|
|||
<script src="{{ url_for('static', filename='js/bootstrap.min.js') }}"></script>
|
||||
{% if autocomplete %}<script src="{{ url_for('static', filename='js/typeahead.bundle.min.js') }}"></script>{% endif %}
|
||||
<script src="{{ url_for('static', filename='js/require-2.1.15.min.js') }}"></script>
|
||||
<script src="{{ url_for('static', filename='js/scripts.js') }}"></script>
|
||||
<script src="{{ url_for('static', filename='js/searx.min.js') }}"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
<h4 class="panel-title">{{ infobox.infobox }}</h4>
|
||||
</div>
|
||||
<div class="panel-body">
|
||||
{% if infobox.img_src %}<img class="img-responsive center-block infobox_part" src="{{ infobox.img_src }}" />{% endif %}
|
||||
{% if infobox.img_src %}<img class="img-responsive center-block infobox_part" src="{{ infobox.img_src }}" alt="{{ infobox.infobox }}" />{% endif %}
|
||||
{% if infobox.content %}<p class="infobox_part">{{ infobox.content }}</p>{% endif %}
|
||||
|
||||
{% if infobox.attributes %}
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
{% from 'oscar/macros.html' import icon %}
|
||||
|
||||
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
|
||||
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" alt="{{ result['favicon'] }}" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
|
||||
|
||||
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.publishedDate }}" pubdate>{{ result.publishedDate }}</time>{% endif %}
|
||||
<small><a class="text-info" href="https://web.archive.org/web/{{ result.pretty_url }}">{{ icon('link') }} {{ _('cached') }}</a></small>
|
||||
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.pubdate }}" >{{ result.publishedDate }}</time>{% endif %}
|
||||
<small><a class="text-info" href="https://web.archive.org/web/{{ result.url }}">{{ icon('link') }} {{ _('cached') }}</a></small>
|
||||
|
||||
{% if result.content %}<p class="result-content">{{ result.content|safe }}</p>{% endif %}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<a href="{{ result.img_src }}" data-toggle="modal" data-target="#modal-{{ index }}">
|
||||
<img src="{{ result.img_src }}" alt="{{ result.title|e }}" class="img-thumbnail">
|
||||
<img src="{{ result.img_src }}" alt="{{ result.title|striptags }}" title="{{ result.title|striptags }}" class="img-thumbnail">
|
||||
</a>
|
||||
|
||||
<div class="modal fade" id="modal-{{ index }}" tabindex="-1" role="dialog" aria-hidden="true">
|
||||
|
@ -7,7 +7,7 @@
|
|||
<div class="modal-content">
|
||||
<div class="modal-header">
|
||||
<button type="button" class="close" data-dismiss="modal"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button>
|
||||
<h4 class="modal-title">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" /> {% endif %}{{ result.title|striptags }}</h4>
|
||||
<h4 class="modal-title">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" alt="{{ result['favicon'] }}" /> {% endif %}{{ result.title|striptags }}</h4>
|
||||
</div>
|
||||
<div class="modal-body">
|
||||
<img class="img-responsive center-block" src="{{ result.img_src }}" alt="{{ result.title }}">
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
{% from 'oscar/macros.html' import icon %}
|
||||
|
||||
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
|
||||
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" alt="{{ result['favicon'] }}" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
|
||||
|
||||
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.publishedDate }}" pubdate>{{ result.publishedDate }}</time>{% endif %}
|
||||
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.pubdate }}" >{{ result.publishedDate }}</time>{% endif %}
|
||||
|
||||
<small><a class="text-info" href="https://web.archive.org/web/{{ result.pretty_url }}">{{ icon('link') }} {{ _('cached') }}</a></small>
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
{% from 'oscar/macros.html' import icon %}
|
||||
|
||||
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
|
||||
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" alt="{{ result['favicon'] }}" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
|
||||
|
||||
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.publishedDate }}" pubdate>{{ result.publishedDate }}</time>{% endif %}
|
||||
<small><a class="text-info" href="https://web.archive.org/web/{{ result.pretty_url }}">{{ icon('link') }} {{ _('cached') }}</a></small>
|
||||
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.pubdate }}" >{{ result.publishedDate }}</time>{% endif %}
|
||||
<small><a class="text-info" href="https://web.archive.org/web/{{ result.url }}">{{ icon('link') }} {{ _('cached') }}</a></small>
|
||||
|
||||
<p class="result-content">{{ icon('transfer') }} {{ _('Seeder') }} <span class="badge">{{ result.seed }}</span>, {{ _('Leecher') }} <span class="badge">{{ result.leech }}</span>
|
||||
<br/>
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
{% from 'oscar/macros.html' import icon %}
|
||||
|
||||
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
|
||||
<h4 class="result_header">{% if result['favicon'] %}<img width="32" height="32" class="favicon" src="static/{{ theme }}/img/icons/{{ result['favicon'] }}.png" alt="{{ result['favicon'] }}" /> {% endif %}<a href="{{ result.url }}">{{ result.title|safe }}</a></h4>
|
||||
|
||||
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.publishedDate }}" pubdate>{{ result.publishedDate }}</time>{% endif %}
|
||||
<small><a class="text-info" href="https://web.archive.org/web/{{ result.pretty_url }}">{{ icon('link') }} {{ _('cached') }}</a></small>
|
||||
{% if result.publishedDate %}<time class="text-muted" datetime="{{ result.pubdate }}" >{{ result.publishedDate }}</time>{% endif %}
|
||||
<small><a class="text-info" href="https://web.archive.org/web/{{ result.url }}">{{ icon('link') }} {{ _('cached') }}</a></small>
|
||||
|
||||
<div class="container-fluid">
|
||||
<div class="row">
|
||||
<img class="thumbnail col-xs-6 col-sm-4 col-md-4 result-content" src="{{ result.thumbnail|safe }}" />
|
||||
<img class="thumbnail col-xs-6 col-sm-4 col-md-4 result-content" src="{{ result.thumbnail|safe }}" alt="{{ result.title|urlencode }} {{ result['favicon'] }}" />
|
||||
{% if result.content %}<p class="col-xs-12 col-sm-8 col-md-8 result-content">{{ result.content|safe }}</p>{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -51,6 +51,11 @@
|
|||
</div><!-- /#main_results -->
|
||||
|
||||
<div class="col-sm-4" id="sidebar_results">
|
||||
{% if infoboxes %}
|
||||
{% for infobox in infoboxes %}
|
||||
{% include 'oscar/infobox.html' %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if suggestions %}
|
||||
<div class="panel panel-default">
|
||||
|
@ -76,7 +81,7 @@
|
|||
<form role="form">
|
||||
<div class="form-group">
|
||||
<label for="search_url">{{ _('Search URL') }}</label>
|
||||
<input type="url" class="form-control select-all-on-click cursor-text" name="search_url" value="{{ base_url }}?q={{ q|urlencode }}&pageno={{ pageno }}{% if selected_categories %}&category_{{ selected_categories|join("&category_") }}{% endif %}" readonly>
|
||||
<input id="search_url" type="url" class="form-control select-all-on-click cursor-text" name="search_url" value="{{ base_url }}?q={{ q|urlencode }}&pageno={{ pageno }}{% if selected_categories %}&category_{{ selected_categories|join("&category_")|replace(' ','+') }}{% endif %}" readonly>
|
||||
</div>
|
||||
</form>
|
||||
|
||||
|
@ -94,13 +99,6 @@
|
|||
<div class="clearfix"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% if infoboxes %}
|
||||
{% for infobox in infoboxes %}
|
||||
{% include 'oscar/infobox.html' %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
</div><!-- /#sidebar_results -->
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
<form method="{{ method or 'POST' }}" action="{{ url_for('index') }}" id="search_form" role="search">
|
||||
<div class="input-group col-md-8 col-md-offset-2">
|
||||
<input type="search" name="q" class="form-control input-lg" id="q" placeholder="{{ _('Search for...') }}" autocomplete="off" value="{{ q }}">
|
||||
<input type="search" name="q" class="form-control input-lg autofocus" id="q" placeholder="{{ _('Search for...') }}" autocomplete="off" value="{{ q }}">
|
||||
<span class="input-group-btn">
|
||||
<button type="submit" class="btn btn-default input-lg">{{ icon('search') }}<span class="sr-only">{{ _('Start search') }}</span></button>
|
||||
</span>
|
||||
|
|
|
@ -18,7 +18,9 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
|||
|
||||
# version of searx
|
||||
VERSION_MAJOR = 0
|
||||
VERSION_MINOR = 4
|
||||
VERSION_MINOR = 5
|
||||
VERSION_BUILD = 0
|
||||
|
||||
VERSION_STRING = "%d.%d.%d" % (VERSION_MAJOR,VERSION_MINOR,VERSION_BUILD)
|
||||
VERSION_STRING = "{0}.{1}.{2}".format(VERSION_MAJOR,
|
||||
VERSION_MINOR,
|
||||
VERSION_BUILD)
|
||||
|
|
|
@ -17,10 +17,6 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
|||
(C) 2013- by Adam Tauber, <asciimoo@gmail.com>
|
||||
'''
|
||||
|
||||
from gevent import monkey
|
||||
monkey.patch_all()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
from sys import path
|
||||
from os.path import realpath, dirname
|
||||
|
@ -298,10 +294,9 @@ def index():
|
|||
|
||||
# TODO, check if timezone is calculated right
|
||||
if 'publishedDate' in result:
|
||||
if result['publishedDate'].replace(tzinfo=None)\
|
||||
>= datetime.now() - timedelta(days=1):
|
||||
timedifference = datetime.now() - result['publishedDate']\
|
||||
.replace(tzinfo=None)
|
||||
result['pubdate'] = result['publishedDate'].strftime('%Y-%m-%d %H:%M:%S%z')
|
||||
if result['publishedDate'].replace(tzinfo=None) >= datetime.now() - timedelta(days=1):
|
||||
timedifference = datetime.now() - result['publishedDate'].replace(tzinfo=None)
|
||||
minutes = int((timedifference.seconds / 60) % 60)
|
||||
hours = int(timedifference.seconds / 60 / 60)
|
||||
if hours == 0:
|
||||
|
@ -309,8 +304,6 @@ def index():
|
|||
else:
|
||||
result['publishedDate'] = gettext(u'{hours} hour(s), {minutes} minute(s) ago').format(hours=hours, minutes=minutes) # noqa
|
||||
else:
|
||||
result['pubdate'] = result['publishedDate']\
|
||||
.strftime('%a, %d %b %Y %H:%M:%S %z')
|
||||
result['publishedDate'] = format_date(result['publishedDate'])
|
||||
|
||||
if search.request_data.get('format') == 'json':
|
||||
|
@ -409,7 +402,7 @@ def autocompleter():
|
|||
|
||||
# return autocompleter results
|
||||
if request_data.get('format') == 'x-suggestions':
|
||||
return Response(json.dumps([query, results]),
|
||||
return Response(json.dumps([query.query, results]),
|
||||
mimetype='application/json')
|
||||
else:
|
||||
return Response(json.dumps(results),
|
||||
|
|
|
@ -41,7 +41,7 @@ setup(
|
|||
install_requires=[
|
||||
'flask',
|
||||
'flask-babel',
|
||||
'grequests',
|
||||
'requests',
|
||||
'lxml',
|
||||
'pyyaml',
|
||||
'setuptools',
|
||||
|
|
|
@ -36,18 +36,6 @@ zc.recipe.testrunner = 2.0.0
|
|||
# WebTest==2.0.11
|
||||
beautifulsoup4 = 4.3.2
|
||||
|
||||
# Required by:
|
||||
# grequests==0.2.0
|
||||
gevent = 1.0
|
||||
|
||||
# Required by:
|
||||
# gevent==1.0
|
||||
greenlet = 0.4.2
|
||||
|
||||
# Required by:
|
||||
# searx==0.1
|
||||
grequests = 0.2.0
|
||||
|
||||
# Required by:
|
||||
# robotframework-httplibrary==0.4.2
|
||||
jsonpatch = 1.3
|
||||
|
|
Loading…
Reference in New Issue