update version 0.8
This commit is contained in:
parent
f03a6c556f
commit
a1618d9d20
|
@ -1,16 +1,37 @@
|
||||||
|
general:
|
||||||
|
debug : False # Debug mode, only for development
|
||||||
|
|
||||||
|
search:
|
||||||
|
safe_search : 0 # Filter results. 0: None, 1: Moderate, 2: Strict
|
||||||
|
autocomplete : "duckduckgo" # Existing autocomplete backends: "dbpedia", "duckduckgo", "google", "startpage", "wikipedia" - leave blank to turn it off by default
|
||||||
|
|
||||||
server:
|
server:
|
||||||
port : 8888
|
port : 8888
|
||||||
|
bind_address : "127.0.0.1" # address to listen on
|
||||||
secret_key : "ultrasecretkey" # change this!
|
secret_key : "ultrasecretkey" # change this!
|
||||||
debug : False # Debug mode, only for development
|
|
||||||
request_timeout : 2.0 # seconds
|
|
||||||
base_url : ynhbaseurl # Set custom base_url. Possible values: False or "https://your.custom.host/location/"
|
base_url : ynhbaseurl # Set custom base_url. Possible values: False or "https://your.custom.host/location/"
|
||||||
|
image_proxy : False # Proxying image results through searx
|
||||||
|
|
||||||
|
ui:
|
||||||
themes_path : "" # Custom ui themes path - leave it blank if you didn't change
|
themes_path : "" # Custom ui themes path - leave it blank if you didn't change
|
||||||
default_theme : oscar # ui theme
|
default_theme : oscar # ui theme
|
||||||
https_rewrite : True # Force rewrite result urls. See searx/https_rewrite.py
|
|
||||||
useragent_suffix : "" # suffix of searx_useragent, could contain informations like an email address to the administrator
|
|
||||||
image_proxy : False # Proxying image results through searx
|
|
||||||
default_locale : "" # Default interface locale - leave blank to detect from browser information or use codes from the 'locales' config section
|
default_locale : "" # Default interface locale - leave blank to detect from browser information or use codes from the 'locales' config section
|
||||||
|
|
||||||
|
outgoing: # communication with search engines
|
||||||
|
request_timeout : 2.0 # seconds
|
||||||
|
useragent_suffix : "" # suffix of searx_useragent, could contain informations like an email address to the administrator
|
||||||
|
# uncomment below section if you want to use a proxy
|
||||||
|
# see http://docs.python-requests.org/en/latest/user/advanced/#proxies
|
||||||
|
# SOCKS proxies are not supported : see https://github.com/kennethreitz/requests/pull/478
|
||||||
|
# proxies :
|
||||||
|
# http : http://127.0.0.1:8080
|
||||||
|
# https: http://127.0.0.1:8080
|
||||||
|
# uncomment below section only if you have more than one network interface
|
||||||
|
# which can be the source of outgoing search requests
|
||||||
|
# source_ips:
|
||||||
|
# - 1.1.1.1
|
||||||
|
# - 1.1.1.2
|
||||||
|
|
||||||
engines:
|
engines:
|
||||||
- name : wikipedia
|
- name : wikipedia
|
||||||
engine : mediawiki
|
engine : mediawiki
|
||||||
|
@ -20,17 +41,14 @@ engines:
|
||||||
|
|
||||||
- name : bing
|
- name : bing
|
||||||
engine : bing
|
engine : bing
|
||||||
locale : en-US
|
|
||||||
shortcut : bi
|
shortcut : bi
|
||||||
|
|
||||||
- name : bing images
|
- name : bing images
|
||||||
engine : bing_images
|
engine : bing_images
|
||||||
locale : en-US
|
|
||||||
shortcut : bii
|
shortcut : bii
|
||||||
|
|
||||||
- name : bing news
|
- name : bing news
|
||||||
engine : bing_news
|
engine : bing_news
|
||||||
locale : en-US
|
|
||||||
shortcut : bin
|
shortcut : bin
|
||||||
|
|
||||||
- name : btdigg
|
- name : btdigg
|
||||||
|
@ -73,12 +91,6 @@ engines:
|
||||||
# shortcut : fa
|
# shortcut : fa
|
||||||
# api_key : 'apikey' # required!
|
# api_key : 'apikey' # required!
|
||||||
|
|
||||||
# down - website is under criminal investigation by the UK
|
|
||||||
# - name : filecrop
|
|
||||||
# engine : filecrop
|
|
||||||
# categories : files
|
|
||||||
# shortcut : fc
|
|
||||||
|
|
||||||
- name : 500px
|
- name : 500px
|
||||||
engine : www500px
|
engine : www500px
|
||||||
shortcut : px
|
shortcut : px
|
||||||
|
@ -98,9 +110,9 @@ engines:
|
||||||
# Or you can use the html non-stable engine, activated by default
|
# Or you can use the html non-stable engine, activated by default
|
||||||
engine : flickr_noapi
|
engine : flickr_noapi
|
||||||
|
|
||||||
- name : general-file
|
- name : gigablast
|
||||||
engine : generalfile
|
engine : gigablast
|
||||||
shortcut : gf
|
shortcut : gb
|
||||||
disabled: True
|
disabled: True
|
||||||
|
|
||||||
- name : github
|
- name : github
|
||||||
|
@ -164,6 +176,28 @@ engines:
|
||||||
- name : piratebay
|
- name : piratebay
|
||||||
engine : piratebay
|
engine : piratebay
|
||||||
shortcut : tpb
|
shortcut : tpb
|
||||||
|
disabled : True
|
||||||
|
|
||||||
|
- name : qwant
|
||||||
|
engine : qwant
|
||||||
|
shortcut : qw
|
||||||
|
categories : general
|
||||||
|
disabled : True
|
||||||
|
|
||||||
|
- name : qwant images
|
||||||
|
engine : qwant
|
||||||
|
shortcut : qwi
|
||||||
|
categories : images
|
||||||
|
|
||||||
|
- name : qwant news
|
||||||
|
engine : qwant
|
||||||
|
shortcut : qwn
|
||||||
|
categories : news
|
||||||
|
|
||||||
|
- name : qwant social
|
||||||
|
engine : qwant
|
||||||
|
shortcut : qws
|
||||||
|
categories : social media
|
||||||
|
|
||||||
- name : kickass
|
- name : kickass
|
||||||
engine : kickass
|
engine : kickass
|
||||||
|
@ -186,6 +220,10 @@ engines:
|
||||||
shortcut : scc
|
shortcut : scc
|
||||||
disabled : True
|
disabled : True
|
||||||
|
|
||||||
|
- name : spotify
|
||||||
|
engine : spotify
|
||||||
|
shortcut : stf
|
||||||
|
|
||||||
- name : subtitleseeker
|
- name : subtitleseeker
|
||||||
engine : subtitleseeker
|
engine : subtitleseeker
|
||||||
shortcut : ss
|
shortcut : ss
|
||||||
|
@ -196,12 +234,21 @@ engines:
|
||||||
- name : startpage
|
- name : startpage
|
||||||
engine : startpage
|
engine : startpage
|
||||||
shortcut : sp
|
shortcut : sp
|
||||||
|
timeout : 6.0
|
||||||
|
disabled : True
|
||||||
|
|
||||||
# +30% page load time
|
- name : ixquick
|
||||||
# - name : ixquick
|
engine : startpage
|
||||||
# engine : startpage
|
base_url : 'https://www.ixquick.com/'
|
||||||
# base_url : 'https://www.ixquick.com/'
|
search_url : 'https://www.ixquick.com/do/search'
|
||||||
# search_url : 'https://www.ixquick.com/do/search'
|
shortcut : iq
|
||||||
|
timeout : 6.0
|
||||||
|
disabled : True
|
||||||
|
|
||||||
|
- name : swisscows
|
||||||
|
engine : swisscows
|
||||||
|
shortcut : sw
|
||||||
|
disabled : True
|
||||||
|
|
||||||
- name : twitter
|
- name : twitter
|
||||||
engine : twitter
|
engine : twitter
|
||||||
|
@ -232,8 +279,13 @@ engines:
|
||||||
shortcut : yhn
|
shortcut : yhn
|
||||||
|
|
||||||
- name : youtube
|
- name : youtube
|
||||||
engine : youtube
|
|
||||||
shortcut : yt
|
shortcut : yt
|
||||||
|
# You can use the engine using the official stable API, but you need an API key
|
||||||
|
# See : https://console.developers.google.com/project
|
||||||
|
# engine : youtube_api
|
||||||
|
# api_key: 'apikey' # required!
|
||||||
|
# Or you can use the html non-stable engine, activated by default
|
||||||
|
engine : youtube_noapi
|
||||||
|
|
||||||
- name : dailymotion
|
- name : dailymotion
|
||||||
engine : dailymotion
|
engine : dailymotion
|
||||||
|
@ -241,9 +293,14 @@ engines:
|
||||||
|
|
||||||
- name : vimeo
|
- name : vimeo
|
||||||
engine : vimeo
|
engine : vimeo
|
||||||
locale : en-US
|
|
||||||
shortcut : vm
|
shortcut : vm
|
||||||
|
|
||||||
|
#The blekko technology and team have joined IBM Watson! -> https://blekko.com/
|
||||||
|
# - name : blekko images
|
||||||
|
# engine : blekko_images
|
||||||
|
# locale : en-US
|
||||||
|
# shortcut : bli
|
||||||
|
|
||||||
# - name : yacy
|
# - name : yacy
|
||||||
# engine : yacy
|
# engine : yacy
|
||||||
# shortcut : ya
|
# shortcut : ya
|
||||||
|
@ -254,6 +311,7 @@ engines:
|
||||||
locales:
|
locales:
|
||||||
en : English
|
en : English
|
||||||
de : Deutsch
|
de : Deutsch
|
||||||
|
he : Hebrew
|
||||||
hu : Magyar
|
hu : Magyar
|
||||||
fr : Français
|
fr : Français
|
||||||
es : Español
|
es : Español
|
||||||
|
@ -261,4 +319,7 @@ locales:
|
||||||
nl : Nederlands
|
nl : Nederlands
|
||||||
ja : 日本語 (Japanese)
|
ja : 日本語 (Japanese)
|
||||||
tr : Türkçe
|
tr : Türkçe
|
||||||
|
pt : Português
|
||||||
ru : Russian
|
ru : Russian
|
||||||
|
ro : Romanian
|
||||||
|
zh : 中文 (Chinese)
|
||||||
|
|
|
@ -1,11 +1,11 @@
|
||||||
Searx was created and is maintained by Adam Tauber.
|
Searx was created by Adam Tauber and is maintained by Adam Tauber and Alexandre Flament.
|
||||||
|
|
||||||
Major contributing authors:
|
Major contributing authors:
|
||||||
|
|
||||||
- Adam Tauber <asciimoo@gmail.com> `@asciimoo <https://github.com/asciimoo>`_
|
- Adam Tauber <asciimoo@gmail.com> `@asciimoo <https://github.com/asciimoo>`_
|
||||||
- Matej Cotman
|
- Matej Cotman
|
||||||
- Thomas Pointhuber
|
- Thomas Pointhuber
|
||||||
- Alexandre Flament
|
- Alexandre Flament `@dalf <https://github.com/dalf>`_
|
||||||
- @Cqoicebordel
|
- @Cqoicebordel
|
||||||
|
|
||||||
People who have submitted patches/translates, reported bugs, consulted features or
|
People who have submitted patches/translates, reported bugs, consulted features or
|
||||||
|
@ -34,3 +34,7 @@ generally made searx better:
|
||||||
- @opi
|
- @opi
|
||||||
- @dimqua
|
- @dimqua
|
||||||
- Giorgos Logiotatidis
|
- Giorgos Logiotatidis
|
||||||
|
- Luc Didry
|
||||||
|
- Niklas Haas
|
||||||
|
- @underr
|
||||||
|
- Emmanuel Benazera
|
||||||
|
|
|
@ -1,3 +1,49 @@
|
||||||
|
0.8.0 2015.09.08
|
||||||
|
================
|
||||||
|
|
||||||
|
- New engines
|
||||||
|
|
||||||
|
- Blekko (image)
|
||||||
|
- Gigablast (general)
|
||||||
|
- Spotify (music)
|
||||||
|
- Swisscows (general, images)
|
||||||
|
- Qwant (general, images, news, social media)
|
||||||
|
- Plugin system
|
||||||
|
- New plugins
|
||||||
|
|
||||||
|
- HTTPS rewrite
|
||||||
|
- Search on cagetory select
|
||||||
|
- User information
|
||||||
|
- Tracker url part remover
|
||||||
|
- Multiple outgoing IP and HTTP/HTTPS proxy support
|
||||||
|
- New autocompleter: startpage
|
||||||
|
- New theme: pix-art
|
||||||
|
- Settings file structure change
|
||||||
|
- Fabfile, docker deployment
|
||||||
|
- Optional safesearch result filter
|
||||||
|
- Force HTTPS in engines if possible
|
||||||
|
- Disabled HTTP referrer on outgoing links
|
||||||
|
- Display cookie information
|
||||||
|
- Prettier search URLs
|
||||||
|
- Right-to-left text handling in themes
|
||||||
|
- Translation updates (New locales: Chinese, Hebrew, Portuguese, Romanian)
|
||||||
|
|
||||||
|
|
||||||
|
New dependencies
|
||||||
|
~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
- pyopenssl
|
||||||
|
- ndg-httpsclient
|
||||||
|
- pyasn1
|
||||||
|
- pyasn1-modules
|
||||||
|
- certifi
|
||||||
|
|
||||||
|
|
||||||
|
News
|
||||||
|
~~~~
|
||||||
|
|
||||||
|
@dalf joined the maintainer "team"
|
||||||
|
|
||||||
0.7.0 2015.02.03
|
0.7.0 2015.02.03
|
||||||
================
|
================
|
||||||
|
|
||||||
|
|
|
@ -1,21 +1,22 @@
|
||||||
FROM debian:stable
|
FROM python:2.7-slim
|
||||||
|
|
||||||
RUN apt-get update && \
|
WORKDIR /app
|
||||||
apt-get install -y --no-install-recommends \
|
|
||||||
python-dev python2.7-minimal python-virtualenv \
|
|
||||||
python-pybabel python-pip zlib1g-dev \
|
|
||||||
libxml2-dev libxslt1-dev build-essential \
|
|
||||||
openssl
|
|
||||||
|
|
||||||
RUN useradd searx
|
RUN useradd searx
|
||||||
|
|
||||||
WORKDIR /app
|
EXPOSE 5000
|
||||||
RUN pip install uwsgi
|
CMD ["/usr/local/bin/uwsgi", "--uid", "searx", "--gid", "searx", "--http", ":5000", "-w", "searx.webapp"]
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
zlib1g-dev libxml2-dev libxslt1-dev libffi-dev build-essential \
|
||||||
|
libssl-dev openssl && \
|
||||||
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
RUN pip install --no-cache uwsgi
|
||||||
|
|
||||||
COPY requirements.txt /app/requirements.txt
|
COPY requirements.txt /app/requirements.txt
|
||||||
RUN pip install -r requirements.txt
|
RUN pip install --no-cache -r requirements.txt
|
||||||
|
|
||||||
COPY . /app
|
COPY . /app
|
||||||
RUN sed -i -e "s/ultrasecretkey/`openssl rand -hex 16`/g" searx/settings.yml
|
RUN sed -i -e "s/ultrasecretkey/`openssl rand -hex 16`/g" searx/settings.yml
|
||||||
|
|
||||||
EXPOSE 5000
|
|
||||||
CMD ["/usr/local/bin/uwsgi", "--uid", "searx", "--gid", "searx", "--http", ":5000", "-w", "searx.webapp"]
|
|
||||||
|
|
655
sources/LICENSE
655
sources/LICENSE
|
@ -1,14 +1,661 @@
|
||||||
searx is free software: you can redistribute it and/or modify
|
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 19 November 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU Affero General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works, specifically designed to ensure
|
||||||
|
cooperation with the community in the case of network server software.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
our General Public Licenses are intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
Developers that use our General Public Licenses protect your rights
|
||||||
|
with two steps: (1) assert copyright on the software, and (2) offer
|
||||||
|
you this License which gives you legal permission to copy, distribute
|
||||||
|
and/or modify the software.
|
||||||
|
|
||||||
|
A secondary benefit of defending all users' freedom is that
|
||||||
|
improvements made in alternate versions of the program, if they
|
||||||
|
receive widespread use, become available for other developers to
|
||||||
|
incorporate. Many developers of free software are heartened and
|
||||||
|
encouraged by the resulting cooperation. However, in the case of
|
||||||
|
software used on network servers, this result may fail to come about.
|
||||||
|
The GNU General Public License permits making a modified version and
|
||||||
|
letting the public access it on a server without ever releasing its
|
||||||
|
source code to the public.
|
||||||
|
|
||||||
|
The GNU Affero General Public License is designed specifically to
|
||||||
|
ensure that, in such cases, the modified source code becomes available
|
||||||
|
to the community. It requires the operator of a network server to
|
||||||
|
provide the source code of the modified version running there to the
|
||||||
|
users of that server. Therefore, public use of a modified version, on
|
||||||
|
a publicly accessible server, gives the public access to the source
|
||||||
|
code of the modified version.
|
||||||
|
|
||||||
|
An older license, called the Affero General Public License and
|
||||||
|
published by Affero, was designed to accomplish similar goals. This is
|
||||||
|
a different license, not a version of the Affero GPL, but Affero has
|
||||||
|
released a new version of the Affero GPL which permits relicensing under
|
||||||
|
this license.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, if you modify the
|
||||||
|
Program, your modified version must prominently offer all users
|
||||||
|
interacting with it remotely through a computer network (if your version
|
||||||
|
supports such interaction) an opportunity to receive the Corresponding
|
||||||
|
Source of your version by providing access to the Corresponding Source
|
||||||
|
from a network server at no charge, through some standard or customary
|
||||||
|
means of facilitating copying of software. This Corresponding Source
|
||||||
|
shall include the Corresponding Source for any work covered by version 3
|
||||||
|
of the GNU General Public License that is incorporated pursuant to the
|
||||||
|
following paragraph.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the work with which it is combined will remain governed by version
|
||||||
|
3 of the GNU General Public License.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU Affero General Public License from time to time. Such new versions
|
||||||
|
will be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU Affero General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU Affero General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU Affero General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License as published by
|
it under the terms of the GNU Affero General Public License as published by
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
(at your option) any later version.
|
(at your option) any later version.
|
||||||
|
|
||||||
searx is distributed in the hope that it will be useful,
|
This program is distributed in the hope that it will be useful,
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
GNU Affero General Public License for more details.
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
You should have received a copy of the GNU Affero General Public License
|
You should have received a copy of the GNU Affero General Public License
|
||||||
along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
(C) 2013- by Adam Tauber, <asciimoo@gmail.com>
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If your software can interact with users remotely through a computer
|
||||||
|
network, you should also make sure that it provides a way for users to
|
||||||
|
get its source. For example, if your program is a web application, its
|
||||||
|
interface could display a "Source" link that leads users to an archive
|
||||||
|
of the code. There are many ways you could offer source, and different
|
||||||
|
solutions will be better for different programs; see section 13 for the
|
||||||
|
specific requirements.
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||||
|
<http://www.gnu.org/licenses/>.
|
||||||
|
|
|
@ -51,6 +51,7 @@ styles:
|
||||||
@lessc -x searx/static/themes/courgette/less/style-rtl.less > searx/static/themes/courgette/css/style-rtl.css
|
@lessc -x searx/static/themes/courgette/less/style-rtl.less > searx/static/themes/courgette/css/style-rtl.css
|
||||||
@lessc -x searx/static/less/bootstrap/bootstrap.less > searx/static/css/bootstrap.min.css
|
@lessc -x searx/static/less/bootstrap/bootstrap.less > searx/static/css/bootstrap.min.css
|
||||||
@lessc -x searx/static/themes/oscar/less/oscar/oscar.less > searx/static/themes/oscar/css/oscar.min.css
|
@lessc -x searx/static/themes/oscar/less/oscar/oscar.less > searx/static/themes/oscar/css/oscar.min.css
|
||||||
|
@lessc -x searx/static/themes/pix-art/less/style.less > searx/static/themes/pix-art/css/style.css
|
||||||
|
|
||||||
grunt:
|
grunt:
|
||||||
@grunt --gruntfile searx/static/themes/oscar/gruntfile.js
|
@grunt --gruntfile searx/static/themes/oscar/gruntfile.js
|
||||||
|
|
|
@ -5,3 +5,8 @@ lxml
|
||||||
pyyaml
|
pyyaml
|
||||||
pygments
|
pygments
|
||||||
python-dateutil
|
python-dateutil
|
||||||
|
ndg-httpsclient
|
||||||
|
pyopenssl
|
||||||
|
pyasn1
|
||||||
|
pyasn1-modules
|
||||||
|
certifi
|
||||||
|
|
|
@ -36,26 +36,15 @@ if 'SEARX_SETTINGS_PATH' in environ:
|
||||||
else:
|
else:
|
||||||
settings_path = join(searx_dir, 'settings.yml')
|
settings_path = join(searx_dir, 'settings.yml')
|
||||||
|
|
||||||
if 'SEARX_HTTPS_REWRITE_PATH' in environ:
|
|
||||||
https_rewrite_path = environ['SEARX_HTTPS_REWRITE_PATH']
|
|
||||||
else:
|
|
||||||
https_rewrite_path = join(searx_dir, 'https_rules')
|
|
||||||
|
|
||||||
# load settings
|
# load settings
|
||||||
with open(settings_path) as settings_yaml:
|
with open(settings_path) as settings_yaml:
|
||||||
settings = load(settings_yaml)
|
settings = load(settings_yaml)
|
||||||
|
|
||||||
if settings.get('server', {}).get('debug'):
|
if settings.get('general', {}).get('debug'):
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
else:
|
else:
|
||||||
logging.basicConfig(level=logging.WARNING)
|
logging.basicConfig(level=logging.WARNING)
|
||||||
|
|
||||||
logger = logging.getLogger('searx')
|
logger = logging.getLogger('searx')
|
||||||
|
|
||||||
# load https rules only if https rewrite is enabled
|
|
||||||
if settings.get('server', {}).get('https_rewrite'):
|
|
||||||
# loade https rules
|
|
||||||
from searx.https_rewrite import load_https_rules
|
|
||||||
load_https_rules(https_rewrite_path)
|
|
||||||
|
|
||||||
logger.info('Initialisation done')
|
logger.info('Initialisation done')
|
||||||
|
|
|
@ -19,11 +19,19 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
from json import loads
|
from json import loads
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
|
from searx import settings
|
||||||
from searx.languages import language_codes
|
from searx.languages import language_codes
|
||||||
from searx.engines import (
|
from searx.engines import (
|
||||||
categories, engines, engine_shortcuts
|
categories, engines, engine_shortcuts
|
||||||
)
|
)
|
||||||
from searx.poolrequests import get
|
from searx.poolrequests import get as http_get
|
||||||
|
|
||||||
|
|
||||||
|
def get(*args, **kwargs):
|
||||||
|
if 'timeout' not in kwargs:
|
||||||
|
kwargs['timeout'] = settings['outgoing']['request_timeout']
|
||||||
|
|
||||||
|
return http_get(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def searx_bang(full_query):
|
def searx_bang(full_query):
|
||||||
|
@ -103,8 +111,8 @@ def searx_bang(full_query):
|
||||||
|
|
||||||
|
|
||||||
def dbpedia(query):
|
def dbpedia(query):
|
||||||
# dbpedia autocompleter
|
# dbpedia autocompleter, no HTTPS
|
||||||
autocomplete_url = 'http://lookup.dbpedia.org/api/search.asmx/KeywordSearch?' # noqa
|
autocomplete_url = 'http://lookup.dbpedia.org/api/search.asmx/KeywordSearch?'
|
||||||
|
|
||||||
response = get(autocomplete_url
|
response = get(autocomplete_url
|
||||||
+ urlencode(dict(QueryString=query)))
|
+ urlencode(dict(QueryString=query)))
|
||||||
|
@ -131,7 +139,7 @@ def duckduckgo(query):
|
||||||
|
|
||||||
def google(query):
|
def google(query):
|
||||||
# google autocompleter
|
# google autocompleter
|
||||||
autocomplete_url = 'http://suggestqueries.google.com/complete/search?client=toolbar&' # noqa
|
autocomplete_url = 'https://suggestqueries.google.com/complete/search?client=toolbar&'
|
||||||
|
|
||||||
response = get(autocomplete_url
|
response = get(autocomplete_url
|
||||||
+ urlencode(dict(q=query)))
|
+ urlencode(dict(q=query)))
|
||||||
|
@ -145,9 +153,19 @@ def google(query):
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def startpage(query):
|
||||||
|
# wikipedia autocompleter
|
||||||
|
url = 'https://startpage.com/do/suggest?{query}'
|
||||||
|
|
||||||
|
resp = get(url.format(query=urlencode({'query': query}))).text.split('\n')
|
||||||
|
if len(resp) > 1:
|
||||||
|
return resp
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
def wikipedia(query):
|
def wikipedia(query):
|
||||||
# wikipedia autocompleter
|
# wikipedia autocompleter
|
||||||
url = 'https://en.wikipedia.org/w/api.php?action=opensearch&{0}&limit=10&namespace=0&format=json' # noqa
|
url = 'https://en.wikipedia.org/w/api.php?action=opensearch&{0}&limit=10&namespace=0&format=json'
|
||||||
|
|
||||||
resp = loads(get(url.format(urlencode(dict(search=query)))).text)
|
resp = loads(get(url.format(urlencode(dict(search=query)))).text)
|
||||||
if len(resp) > 1:
|
if len(resp) > 1:
|
||||||
|
@ -158,5 +176,6 @@ def wikipedia(query):
|
||||||
backends = {'dbpedia': dbpedia,
|
backends = {'dbpedia': dbpedia,
|
||||||
'duckduckgo': duckduckgo,
|
'duckduckgo': duckduckgo,
|
||||||
'google': google,
|
'google': google,
|
||||||
|
'startpage': startpage,
|
||||||
'wikipedia': wikipedia
|
'wikipedia': wikipedia
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -71,8 +71,11 @@ def load_engine(engine_data):
|
||||||
if not hasattr(engine, 'language_support'):
|
if not hasattr(engine, 'language_support'):
|
||||||
engine.language_support = True
|
engine.language_support = True
|
||||||
|
|
||||||
|
if not hasattr(engine, 'safesearch'):
|
||||||
|
engine.safesearch = False
|
||||||
|
|
||||||
if not hasattr(engine, 'timeout'):
|
if not hasattr(engine, 'timeout'):
|
||||||
engine.timeout = settings['server']['request_timeout']
|
engine.timeout = settings['outgoing']['request_timeout']
|
||||||
|
|
||||||
if not hasattr(engine, 'shortcut'):
|
if not hasattr(engine, 'shortcut'):
|
||||||
engine.shortcut = ''
|
engine.shortcut = ''
|
||||||
|
|
|
@ -1,15 +1,17 @@
|
||||||
## Bing (Web)
|
"""
|
||||||
#
|
Bing (Web)
|
||||||
# @website https://www.bing.com
|
|
||||||
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
|
@website https://www.bing.com
|
||||||
# max. 5000 query/month
|
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
|
||||||
#
|
max. 5000 query/month
|
||||||
# @using-api no (because of query limit)
|
|
||||||
# @results HTML (using search portal)
|
@using-api no (because of query limit)
|
||||||
# @stable no (HTML can change)
|
@results HTML (using search portal)
|
||||||
# @parse url, title, content
|
@stable no (HTML can change)
|
||||||
#
|
@parse url, title, content
|
||||||
# @todo publishedDate
|
|
||||||
|
@todo publishedDate
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from cgi import escape
|
from cgi import escape
|
||||||
|
@ -50,7 +52,7 @@ def request(query, params):
|
||||||
def response(resp):
|
def response(resp):
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
dom = html.fromstring(resp.content)
|
dom = html.fromstring(resp.text)
|
||||||
|
|
||||||
# parse results
|
# parse results
|
||||||
for result in dom.xpath('//div[@class="sa_cc"]'):
|
for result in dom.xpath('//div[@class="sa_cc"]'):
|
||||||
|
|
|
@ -1,17 +1,19 @@
|
||||||
## Bing (Images)
|
"""
|
||||||
#
|
Bing (Images)
|
||||||
# @website https://www.bing.com/images
|
|
||||||
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
|
@website https://www.bing.com/images
|
||||||
# max. 5000 query/month
|
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
|
||||||
#
|
max. 5000 query/month
|
||||||
# @using-api no (because of query limit)
|
|
||||||
# @results HTML (using search portal)
|
@using-api no (because of query limit)
|
||||||
# @stable no (HTML can change)
|
@results HTML (using search portal)
|
||||||
# @parse url, title, img_src
|
@stable no (HTML can change)
|
||||||
#
|
@parse url, title, img_src
|
||||||
# @todo currently there are up to 35 images receive per page,
|
|
||||||
# because bing does not parse count=10.
|
@todo currently there are up to 35 images receive per page,
|
||||||
# limited response to 10 images
|
because bing does not parse count=10.
|
||||||
|
limited response to 10 images
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from lxml import html
|
from lxml import html
|
||||||
|
@ -26,7 +28,7 @@ safesearch = True
|
||||||
# search-url
|
# search-url
|
||||||
base_url = 'https://www.bing.com/'
|
base_url = 'https://www.bing.com/'
|
||||||
search_string = 'images/search?{query}&count=10&first={offset}'
|
search_string = 'images/search?{query}&count=10&first={offset}'
|
||||||
thumb_url = "http://ts1.mm.bing.net/th?id={ihk}"
|
thumb_url = "https://www.bing.com/th?id={ihk}"
|
||||||
|
|
||||||
# safesearch definitions
|
# safesearch definitions
|
||||||
safesearch_types = {2: 'STRICT',
|
safesearch_types = {2: 'STRICT',
|
||||||
|
@ -61,7 +63,7 @@ def request(query, params):
|
||||||
def response(resp):
|
def response(resp):
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
dom = html.fromstring(resp.content)
|
dom = html.fromstring(resp.text)
|
||||||
|
|
||||||
# init regex for yaml-parsing
|
# init regex for yaml-parsing
|
||||||
p = re.compile('({|,)([a-z]+):(")')
|
p = re.compile('({|,)([a-z]+):(")')
|
||||||
|
|
|
@ -1,21 +1,22 @@
|
||||||
## Bing (News)
|
"""
|
||||||
#
|
Bing (News)
|
||||||
# @website https://www.bing.com/news
|
|
||||||
# @provide-api yes (http://datamarket.azure.com/dataset/bing/search),
|
@website https://www.bing.com/news
|
||||||
# max. 5000 query/month
|
@provide-api yes (http://datamarket.azure.com/dataset/bing/search),
|
||||||
#
|
max. 5000 query/month
|
||||||
# @using-api no (because of query limit)
|
|
||||||
# @results HTML (using search portal)
|
@using-api no (because of query limit)
|
||||||
# @stable no (HTML can change)
|
@results RSS (using search portal)
|
||||||
# @parse url, title, content, publishedDate
|
@stable yes (except perhaps for the images)
|
||||||
|
@parse url, title, content, publishedDate, thumbnail
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from cgi import escape
|
from urlparse import urlparse, parse_qsl
|
||||||
from lxml import html
|
from datetime import datetime
|
||||||
from datetime import datetime, timedelta
|
|
||||||
from dateutil import parser
|
from dateutil import parser
|
||||||
import re
|
from lxml import etree
|
||||||
from searx.engines.xpath import extract_text
|
from searx.utils import list_get
|
||||||
|
|
||||||
# engine dependent config
|
# engine dependent config
|
||||||
categories = ['news']
|
categories = ['news']
|
||||||
|
@ -24,7 +25,25 @@ language_support = True
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
base_url = 'https://www.bing.com/'
|
base_url = 'https://www.bing.com/'
|
||||||
search_string = 'news/search?{query}&first={offset}'
|
search_string = 'news/search?{query}&first={offset}&format=RSS'
|
||||||
|
|
||||||
|
|
||||||
|
# remove click
|
||||||
|
def url_cleanup(url_string):
|
||||||
|
parsed_url = urlparse(url_string)
|
||||||
|
if parsed_url.netloc == 'www.bing.com' and parsed_url.path == '/news/apiclick.aspx':
|
||||||
|
query = dict(parse_qsl(parsed_url.query))
|
||||||
|
return query.get('url', None)
|
||||||
|
return url_string
|
||||||
|
|
||||||
|
|
||||||
|
# replace the http://*bing4.com/th?id=... by https://www.bing.com/th?id=...
|
||||||
|
def image_url_cleanup(url_string):
|
||||||
|
parsed_url = urlparse(url_string)
|
||||||
|
if parsed_url.netloc.endswith('bing4.com') and parsed_url.path == '/th':
|
||||||
|
query = dict(parse_qsl(parsed_url.query))
|
||||||
|
return "https://www.bing.com/th?id=" + query.get('id')
|
||||||
|
return url_string
|
||||||
|
|
||||||
|
|
||||||
# do search-request
|
# do search-request
|
||||||
|
@ -40,8 +59,6 @@ def request(query, params):
|
||||||
query=urlencode({'q': query, 'setmkt': language}),
|
query=urlencode({'q': query, 'setmkt': language}),
|
||||||
offset=offset)
|
offset=offset)
|
||||||
|
|
||||||
params['cookies']['_FP'] = "ui=en-US"
|
|
||||||
|
|
||||||
params['url'] = base_url + search_path
|
params['url'] = base_url + search_path
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
@ -51,44 +68,40 @@ def request(query, params):
|
||||||
def response(resp):
|
def response(resp):
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
dom = html.fromstring(resp.content)
|
rss = etree.fromstring(resp.text)
|
||||||
|
|
||||||
|
ns = rss.nsmap
|
||||||
|
|
||||||
# parse results
|
# parse results
|
||||||
for result in dom.xpath('//div[@class="sn_r"]'):
|
for item in rss.xpath('./channel/item'):
|
||||||
link = result.xpath('.//div[@class="newstitle"]/a')[0]
|
# url / title / content
|
||||||
url = link.attrib.get('href')
|
url = url_cleanup(item.xpath('./link/text()')[0])
|
||||||
title = extract_text(link)
|
title = list_get(item.xpath('./title/text()'), 0, url)
|
||||||
contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]')
|
content = list_get(item.xpath('./description/text()'), 0, '')
|
||||||
content = escape(extract_text(contentXPath))
|
|
||||||
|
|
||||||
# parse publishedDate
|
# publishedDate
|
||||||
publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div'
|
publishedDate = list_get(item.xpath('./pubDate/text()'), 0)
|
||||||
'//span[contains(@class,"sn_ST")]'
|
|
||||||
'//span[contains(@class,"sn_tm")]')
|
|
||||||
|
|
||||||
publishedDate = escape(extract_text(publishedDateXPath))
|
|
||||||
|
|
||||||
if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
|
|
||||||
timeNumbers = re.findall(r'\d+', publishedDate)
|
|
||||||
publishedDate = datetime.now() - timedelta(minutes=int(timeNumbers[0]))
|
|
||||||
elif re.match("^[0-9]+ hour(s|) ago$", publishedDate):
|
|
||||||
timeNumbers = re.findall(r'\d+', publishedDate)
|
|
||||||
publishedDate = datetime.now() - timedelta(hours=int(timeNumbers[0]))
|
|
||||||
elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate):
|
|
||||||
timeNumbers = re.findall(r'\d+', publishedDate)
|
|
||||||
publishedDate = datetime.now()\
|
|
||||||
- timedelta(hours=int(timeNumbers[0]))\
|
|
||||||
- timedelta(minutes=int(timeNumbers[1]))
|
|
||||||
elif re.match("^[0-9]+ day(s|) ago$", publishedDate):
|
|
||||||
timeNumbers = re.findall(r'\d+', publishedDate)
|
|
||||||
publishedDate = datetime.now() - timedelta(days=int(timeNumbers[0]))
|
|
||||||
else:
|
|
||||||
try:
|
try:
|
||||||
publishedDate = parser.parse(publishedDate, dayfirst=False)
|
publishedDate = parser.parse(publishedDate, dayfirst=False)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
publishedDate = datetime.now()
|
publishedDate = datetime.now()
|
||||||
|
except ValueError:
|
||||||
|
publishedDate = datetime.now()
|
||||||
|
|
||||||
|
# thumbnail
|
||||||
|
thumbnail = list_get(item.xpath('./News:Image/text()', namespaces=ns), 0)
|
||||||
|
if thumbnail is not None:
|
||||||
|
thumbnail = image_url_cleanup(thumbnail)
|
||||||
|
|
||||||
# append result
|
# append result
|
||||||
|
if thumbnail is not None:
|
||||||
|
results.append({'template': 'videos.html',
|
||||||
|
'url': url,
|
||||||
|
'title': title,
|
||||||
|
'publishedDate': publishedDate,
|
||||||
|
'content': content,
|
||||||
|
'thumbnail': thumbnail})
|
||||||
|
else:
|
||||||
results.append({'url': url,
|
results.append({'url': url,
|
||||||
'title': title,
|
'title': title,
|
||||||
'publishedDate': publishedDate,
|
'publishedDate': publishedDate,
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Blekko (Images)
|
"""
|
||||||
#
|
Blekko (Images)
|
||||||
# @website https://blekko.com
|
|
||||||
# @provide-api yes (inofficial)
|
@website https://blekko.com
|
||||||
#
|
@provide-api yes (inofficial)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title, img_src
|
@stable yes
|
||||||
|
@parse url, title, img_src
|
||||||
|
"""
|
||||||
|
|
||||||
from json import loads
|
from json import loads
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## BTDigg (Videos, Music, Files)
|
"""
|
||||||
#
|
BTDigg (Videos, Music, Files)
|
||||||
# @website https://btdigg.org
|
|
||||||
# @provide-api yes (on demand)
|
@website https://btdigg.org
|
||||||
#
|
@provide-api yes (on demand)
|
||||||
# @using-api no
|
|
||||||
# @results HTML (using search portal)
|
@using-api no
|
||||||
# @stable no (HTML can change)
|
@results HTML (using search portal)
|
||||||
# @parse url, title, content, seed, leech, magnetlink
|
@stable no (HTML can change)
|
||||||
|
@parse url, title, content, seed, leech, magnetlink
|
||||||
|
"""
|
||||||
|
|
||||||
from urlparse import urljoin
|
from urlparse import urljoin
|
||||||
from cgi import escape
|
from cgi import escape
|
||||||
|
|
|
@ -1,21 +1,49 @@
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
import re
|
import re
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import unicodedata
|
||||||
|
|
||||||
|
|
||||||
categories = []
|
categories = []
|
||||||
url = 'http://finance.yahoo.com/d/quotes.csv?e=.csv&f=sl1d1t1&s={query}=X'
|
url = 'https://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=sl1d1t1&s={query}=X'
|
||||||
weight = 100
|
weight = 100
|
||||||
|
|
||||||
parser_re = re.compile(r'^\W*(\d+(?:\.\d+)?)\W*([a-z]{3})\W*(?:in)?\W*([a-z]{3})\W*$', re.I) # noqa
|
parser_re = re.compile(u'^\W*(\d+(?:\.\d+)?)\W*([^.0-9].+)\W+in?\W+([^\.]+)\W*$', re.I) # noqa
|
||||||
|
|
||||||
|
db = 1
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_name(name):
|
||||||
|
name = name.lower().replace('-', ' ')
|
||||||
|
name = re.sub(' +', ' ', name)
|
||||||
|
return unicodedata.normalize('NFKD', name).lower()
|
||||||
|
|
||||||
|
|
||||||
|
def name_to_iso4217(name):
|
||||||
|
global db
|
||||||
|
|
||||||
|
name = normalize_name(name)
|
||||||
|
currencies = db['names'].get(name, [name])
|
||||||
|
return currencies[0]
|
||||||
|
|
||||||
|
|
||||||
|
def iso4217_to_name(iso4217, language):
|
||||||
|
global db
|
||||||
|
|
||||||
|
return db['iso4217'].get(iso4217, {}).get(language, iso4217)
|
||||||
|
|
||||||
|
|
||||||
def request(query, params):
|
def request(query, params):
|
||||||
m = parser_re.match(query)
|
m = parser_re.match(unicode(query, 'utf8'))
|
||||||
if not m:
|
if not m:
|
||||||
# wrong query
|
# wrong query
|
||||||
return params
|
return params
|
||||||
|
|
||||||
ammount, from_currency, to_currency = m.groups()
|
ammount, from_currency, to_currency = m.groups()
|
||||||
ammount = float(ammount)
|
ammount = float(ammount)
|
||||||
|
from_currency = name_to_iso4217(from_currency.strip())
|
||||||
|
to_currency = name_to_iso4217(to_currency.strip())
|
||||||
|
|
||||||
q = (from_currency + to_currency).upper()
|
q = (from_currency + to_currency).upper()
|
||||||
|
|
||||||
|
@ -23,6 +51,8 @@ def request(query, params):
|
||||||
params['ammount'] = ammount
|
params['ammount'] = ammount
|
||||||
params['from'] = from_currency
|
params['from'] = from_currency
|
||||||
params['to'] = to_currency
|
params['to'] = to_currency
|
||||||
|
params['from_name'] = iso4217_to_name(from_currency, 'en')
|
||||||
|
params['to_name'] = iso4217_to_name(to_currency, 'en')
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
@ -35,16 +65,18 @@ def response(resp):
|
||||||
except:
|
except:
|
||||||
return results
|
return results
|
||||||
|
|
||||||
answer = '{0} {1} = {2} {3} (1 {1} = {4} {3})'.format(
|
answer = '{0} {1} = {2} {3}, 1 {1} ({5}) = {4} {3} ({6})'.format(
|
||||||
resp.search_params['ammount'],
|
resp.search_params['ammount'],
|
||||||
resp.search_params['from'],
|
resp.search_params['from'],
|
||||||
resp.search_params['ammount'] * conversion_rate,
|
resp.search_params['ammount'] * conversion_rate,
|
||||||
resp.search_params['to'],
|
resp.search_params['to'],
|
||||||
conversion_rate
|
conversion_rate,
|
||||||
|
resp.search_params['from_name'],
|
||||||
|
resp.search_params['to_name'],
|
||||||
)
|
)
|
||||||
|
|
||||||
now_date = datetime.now().strftime('%Y%m%d')
|
now_date = datetime.now().strftime('%Y%m%d')
|
||||||
url = 'http://finance.yahoo.com/currency/converter-results/{0}/{1}-{2}-to-{3}.html' # noqa
|
url = 'https://finance.yahoo.com/currency/converter-results/{0}/{1}-{2}-to-{3}.html' # noqa
|
||||||
url = url.format(
|
url = url.format(
|
||||||
now_date,
|
now_date,
|
||||||
resp.search_params['ammount'],
|
resp.search_params['ammount'],
|
||||||
|
@ -55,3 +87,15 @@ def response(resp):
|
||||||
results.append({'answer': answer, 'url': url})
|
results.append({'answer': answer, 'url': url})
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def load():
|
||||||
|
global db
|
||||||
|
|
||||||
|
current_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
json_data = open(current_dir + "/../data/currencies.json").read()
|
||||||
|
|
||||||
|
db = json.loads(json_data)
|
||||||
|
|
||||||
|
|
||||||
|
load()
|
||||||
|
|
|
@ -1,14 +1,16 @@
|
||||||
## Dailymotion (Videos)
|
"""
|
||||||
#
|
Dailymotion (Videos)
|
||||||
# @website https://www.dailymotion.com
|
|
||||||
# @provide-api yes (http://www.dailymotion.com/developer)
|
@website https://www.dailymotion.com
|
||||||
#
|
@provide-api yes (http://www.dailymotion.com/developer)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title, thumbnail, publishedDate, embedded
|
@stable yes
|
||||||
#
|
@parse url, title, thumbnail, publishedDate, embedded
|
||||||
# @todo set content-parameter with correct data
|
|
||||||
|
@todo set content-parameter with correct data
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from json import loads
|
from json import loads
|
||||||
|
@ -48,7 +50,7 @@ def response(resp):
|
||||||
search_res = loads(resp.text)
|
search_res = loads(resp.text)
|
||||||
|
|
||||||
# return empty array if there are no results
|
# return empty array if there are no results
|
||||||
if not 'list' in search_res:
|
if 'list' not in search_res:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# parse results
|
# parse results
|
||||||
|
@ -60,6 +62,9 @@ def response(resp):
|
||||||
publishedDate = datetime.fromtimestamp(res['created_time'], None)
|
publishedDate = datetime.fromtimestamp(res['created_time'], None)
|
||||||
embedded = embedded_url.format(videoid=res['id'])
|
embedded = embedded_url.format(videoid=res['id'])
|
||||||
|
|
||||||
|
# http to https
|
||||||
|
thumbnail = thumbnail.replace("http://", "https://")
|
||||||
|
|
||||||
results.append({'template': 'videos.html',
|
results.append({'template': 'videos.html',
|
||||||
'url': url,
|
'url': url,
|
||||||
'title': title,
|
'title': title,
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Deezer (Music)
|
"""
|
||||||
#
|
Deezer (Music)
|
||||||
# @website https://deezer.com
|
|
||||||
# @provide-api yes (http://developers.deezer.com/api/)
|
@website https://deezer.com
|
||||||
#
|
@provide-api yes (http://developers.deezer.com/api/)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title, content, embedded
|
@stable yes
|
||||||
|
@parse url, title, content, embedded
|
||||||
|
"""
|
||||||
|
|
||||||
from json import loads
|
from json import loads
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
|
@ -16,11 +18,11 @@ categories = ['music']
|
||||||
paging = True
|
paging = True
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
url = 'http://api.deezer.com/'
|
url = 'https://api.deezer.com/'
|
||||||
search_url = url + 'search?{query}&index={offset}'
|
search_url = url + 'search?{query}&index={offset}'
|
||||||
|
|
||||||
embedded_url = '<iframe scrolling="no" frameborder="0" allowTransparency="true" ' +\
|
embedded_url = '<iframe scrolling="no" frameborder="0" allowTransparency="true" ' +\
|
||||||
'data-src="http://www.deezer.com/plugins/player?type=tracks&id={audioid}" ' +\
|
'data-src="https://www.deezer.com/plugins/player?type=tracks&id={audioid}" ' +\
|
||||||
'width="540" height="80"></iframe>'
|
'width="540" height="80"></iframe>'
|
||||||
|
|
||||||
|
|
||||||
|
@ -45,6 +47,10 @@ def response(resp):
|
||||||
if result['type'] == 'track':
|
if result['type'] == 'track':
|
||||||
title = result['title']
|
title = result['title']
|
||||||
url = result['link']
|
url = result['link']
|
||||||
|
|
||||||
|
if url.startswith('http://'):
|
||||||
|
url = 'https' + url[4:]
|
||||||
|
|
||||||
content = result['artist']['name'] +\
|
content = result['artist']['name'] +\
|
||||||
" • " +\
|
" • " +\
|
||||||
result['album']['title'] +\
|
result['album']['title'] +\
|
||||||
|
|
|
@ -1,14 +1,16 @@
|
||||||
## Deviantart (Images)
|
"""
|
||||||
#
|
Deviantart (Images)
|
||||||
# @website https://www.deviantart.com/
|
|
||||||
# @provide-api yes (https://www.deviantart.com/developers/) (RSS)
|
@website https://www.deviantart.com/
|
||||||
#
|
@provide-api yes (https://www.deviantart.com/developers/) (RSS)
|
||||||
# @using-api no (TODO, rewrite to api)
|
|
||||||
# @results HTML
|
@using-api no (TODO, rewrite to api)
|
||||||
# @stable no (HTML can change)
|
@results HTML
|
||||||
# @parse url, title, thumbnail_src, img_src
|
@stable no (HTML can change)
|
||||||
#
|
@parse url, title, thumbnail_src, img_src
|
||||||
# @todo rewrite to api
|
|
||||||
|
@todo rewrite to api
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from urlparse import urljoin
|
from urlparse import urljoin
|
||||||
|
@ -22,7 +24,7 @@ paging = True
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
base_url = 'https://www.deviantart.com/'
|
base_url = 'https://www.deviantart.com/'
|
||||||
search_url = base_url+'search?offset={offset}&{query}'
|
search_url = base_url+'browse/all/?offset={offset}&{query}'
|
||||||
|
|
||||||
|
|
||||||
# do search-request
|
# do search-request
|
||||||
|
@ -56,6 +58,12 @@ def response(resp):
|
||||||
thumbnail_src = link.xpath('.//img')[0].attrib.get('src')
|
thumbnail_src = link.xpath('.//img')[0].attrib.get('src')
|
||||||
img_src = regex.sub('/', thumbnail_src)
|
img_src = regex.sub('/', thumbnail_src)
|
||||||
|
|
||||||
|
# http to https, remove domain sharding
|
||||||
|
thumbnail_src = re.sub(r"https?://(th|fc)\d+.", "https://th01.", thumbnail_src)
|
||||||
|
thumbnail_src = re.sub(r"http://", "https://", thumbnail_src)
|
||||||
|
|
||||||
|
url = re.sub(r"http://(.*)\.deviantart\.com/", "https://\\1.deviantart.com/", url)
|
||||||
|
|
||||||
# append result
|
# append result
|
||||||
results.append({'url': url,
|
results.append({'url': url,
|
||||||
'title': title,
|
'title': title,
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Digg (News, Social media)
|
"""
|
||||||
#
|
Digg (News, Social media)
|
||||||
# @website https://digg.com/
|
|
||||||
# @provide-api no
|
@website https://digg.com/
|
||||||
#
|
@provide-api no
|
||||||
# @using-api no
|
|
||||||
# @results HTML (using search portal)
|
@using-api no
|
||||||
# @stable no (HTML can change)
|
@results HTML (using search portal)
|
||||||
# @parse url, title, content, publishedDate, thumbnail
|
@stable no (HTML can change)
|
||||||
|
@parse url, title, content, publishedDate, thumbnail
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import quote_plus
|
from urllib import quote_plus
|
||||||
from json import loads
|
from json import loads
|
||||||
|
@ -58,6 +60,9 @@ def response(resp):
|
||||||
pubdate = result.xpath(pubdate_xpath)[0].attrib.get('datetime')
|
pubdate = result.xpath(pubdate_xpath)[0].attrib.get('datetime')
|
||||||
publishedDate = parser.parse(pubdate)
|
publishedDate = parser.parse(pubdate)
|
||||||
|
|
||||||
|
# http to https
|
||||||
|
thumbnail = thumbnail.replace("http://static.digg.com", "https://static.digg.com")
|
||||||
|
|
||||||
# append result
|
# append result
|
||||||
results.append({'url': url,
|
results.append({'url': url,
|
||||||
'title': title,
|
'title': title,
|
||||||
|
|
|
@ -1,17 +1,19 @@
|
||||||
## DuckDuckGo (Web)
|
"""
|
||||||
#
|
DuckDuckGo (Web)
|
||||||
# @website https://duckduckgo.com/
|
|
||||||
# @provide-api yes (https://duckduckgo.com/api),
|
@website https://duckduckgo.com/
|
||||||
# but not all results from search-site
|
@provide-api yes (https://duckduckgo.com/api),
|
||||||
#
|
but not all results from search-site
|
||||||
# @using-api no
|
|
||||||
# @results HTML (using search portal)
|
@using-api no
|
||||||
# @stable no (HTML can change)
|
@results HTML (using search portal)
|
||||||
# @parse url, title, content
|
@stable no (HTML can change)
|
||||||
#
|
@parse url, title, content
|
||||||
# @todo rewrite to api
|
|
||||||
# @todo language support
|
@todo rewrite to api
|
||||||
# (the current used site does not support language-change)
|
@todo language support
|
||||||
|
(the current used site does not support language-change)
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from lxml.html import fromstring
|
from lxml.html import fromstring
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
## Dummy
|
"""
|
||||||
#
|
Dummy
|
||||||
# @results empty array
|
|
||||||
# @stable yes
|
@results empty array
|
||||||
|
@stable yes
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
# do search-request
|
# do search-request
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Faroo (Web, News)
|
"""
|
||||||
#
|
Faroo (Web, News)
|
||||||
# @website http://www.faroo.com
|
|
||||||
# @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
|
@website http://www.faroo.com
|
||||||
#
|
@provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title, content, publishedDate, img_src
|
@stable yes
|
||||||
|
@parse url, title, content, publishedDate, img_src
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from json import loads
|
from json import loads
|
||||||
|
|
|
@ -1,15 +1,17 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
## Flickr (Images)
|
"""
|
||||||
#
|
Flickr (Images)
|
||||||
# @website https://www.flickr.com
|
|
||||||
# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
|
@website https://www.flickr.com
|
||||||
#
|
@provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title, thumbnail, img_src
|
@stable yes
|
||||||
#More info on api-key : https://www.flickr.com/services/apps/create/
|
@parse url, title, thumbnail, img_src
|
||||||
|
More info on api-key : https://www.flickr.com/services/apps/create/
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from json import loads
|
from json import loads
|
||||||
|
@ -48,10 +50,10 @@ def response(resp):
|
||||||
search_results = loads(resp.text)
|
search_results = loads(resp.text)
|
||||||
|
|
||||||
# return empty array if there are no results
|
# return empty array if there are no results
|
||||||
if not 'photos' in search_results:
|
if 'photos' not in search_results:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
if not 'photo' in search_results['photos']:
|
if 'photo' not in search_results['photos']:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
photos = search_results['photos']['photo']
|
photos = search_results['photos']['photo']
|
||||||
|
|
|
@ -1,14 +1,16 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
# Flickr (Images)
|
"""
|
||||||
#
|
Flickr (Images)
|
||||||
# @website https://www.flickr.com
|
|
||||||
# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
|
@website https://www.flickr.com
|
||||||
#
|
@provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
|
||||||
# @using-api no
|
|
||||||
# @results HTML
|
@using-api no
|
||||||
# @stable no
|
@results HTML
|
||||||
# @parse url, title, thumbnail, img_src
|
@stable no
|
||||||
|
@parse url, title, thumbnail, img_src
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from json import loads
|
from json import loads
|
||||||
|
@ -20,10 +22,10 @@ logger = logger.getChild('flickr-noapi')
|
||||||
|
|
||||||
categories = ['images']
|
categories = ['images']
|
||||||
|
|
||||||
url = 'https://secure.flickr.com/'
|
url = 'https://www.flickr.com/'
|
||||||
search_url = url + 'search/?{query}&page={page}'
|
search_url = url + 'search?{query}&page={page}'
|
||||||
photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
|
photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
|
||||||
regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
|
regex = re.compile(r"\"search-photos-lite-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
|
||||||
image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
|
image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
|
||||||
|
|
||||||
paging = True
|
paging = True
|
||||||
|
@ -36,6 +38,7 @@ def build_flickr_url(user_id, photo_id):
|
||||||
def request(query, params):
|
def request(query, params):
|
||||||
params['url'] = search_url.format(query=urlencode({'text': query}),
|
params['url'] = search_url.format(query=urlencode({'text': query}),
|
||||||
page=params['pageno'])
|
page=params['pageno'])
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
@ -73,7 +76,7 @@ def response(resp):
|
||||||
logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
|
logger.debug('cannot find valid image size: {0}'.format(repr(photo)))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if 'id' not in photo['owner']:
|
if 'ownerNsid' not in photo:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# For a bigger thumbnail, keep only the url_z, not the url_n
|
# For a bigger thumbnail, keep only the url_z, not the url_n
|
||||||
|
@ -84,20 +87,14 @@ def response(resp):
|
||||||
else:
|
else:
|
||||||
thumbnail_src = img_src
|
thumbnail_src = img_src
|
||||||
|
|
||||||
url = build_flickr_url(photo['owner']['id'], photo['id'])
|
url = build_flickr_url(photo['ownerNsid'], photo['id'])
|
||||||
|
|
||||||
title = photo.get('title', '')
|
title = photo.get('title', '')
|
||||||
|
|
||||||
content = '<span class="photo-author">' +\
|
content = '<span class="photo-author">' +\
|
||||||
photo['owner']['username'] +\
|
photo['username'] +\
|
||||||
'</span><br />'
|
'</span><br />'
|
||||||
|
|
||||||
if 'description' in photo:
|
|
||||||
content = content +\
|
|
||||||
'<span class="description">' +\
|
|
||||||
photo['description'] +\
|
|
||||||
'</span>'
|
|
||||||
|
|
||||||
# append result
|
# append result
|
||||||
results.append({'url': url,
|
results.append({'url': url,
|
||||||
'title': title,
|
'title': title,
|
||||||
|
|
|
@ -1,14 +1,16 @@
|
||||||
## General Files (Files)
|
"""
|
||||||
#
|
General Files (Files)
|
||||||
# @website http://www.general-files.org
|
|
||||||
# @provide-api no (nothing found)
|
@website http://www.general-files.org
|
||||||
#
|
@provide-api no (nothing found)
|
||||||
# @using-api no (because nothing found)
|
|
||||||
# @results HTML (using search portal)
|
@using-api no (because nothing found)
|
||||||
# @stable no (HTML can change)
|
@results HTML (using search portal)
|
||||||
# @parse url, title, content
|
@stable no (HTML can change)
|
||||||
#
|
@parse url, title, content
|
||||||
# @todo detect torrents?
|
|
||||||
|
@todo detect torrents?
|
||||||
|
"""
|
||||||
|
|
||||||
from lxml import html
|
from lxml import html
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Gigablast (Web)
|
"""
|
||||||
#
|
Gigablast (Web)
|
||||||
# @website http://gigablast.com
|
|
||||||
# @provide-api yes (http://gigablast.com/api.html)
|
@website http://gigablast.com
|
||||||
#
|
@provide-api yes (http://gigablast.com/api.html)
|
||||||
# @using-api yes
|
|
||||||
# @results XML
|
@using-api yes
|
||||||
# @stable yes
|
@results XML
|
||||||
# @parse url, title, content
|
@stable yes
|
||||||
|
@parse url, title, content
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from cgi import escape
|
from cgi import escape
|
||||||
|
@ -17,7 +19,7 @@ categories = ['general']
|
||||||
paging = True
|
paging = True
|
||||||
number_of_results = 5
|
number_of_results = 5
|
||||||
|
|
||||||
# search-url
|
# search-url, invalid HTTPS certificate
|
||||||
base_url = 'http://gigablast.com/'
|
base_url = 'http://gigablast.com/'
|
||||||
search_string = 'search?{query}&n={number_of_results}&s={offset}&xml=1&qh=0'
|
search_string = 'search?{query}&n={number_of_results}&s={offset}&xml=1&qh=0'
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Github (It)
|
"""
|
||||||
#
|
Github (It)
|
||||||
# @website https://github.com/
|
|
||||||
# @provide-api yes (https://developer.github.com/v3/)
|
@website https://github.com/
|
||||||
#
|
@provide-api yes (https://developer.github.com/v3/)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes (using api)
|
@results JSON
|
||||||
# @parse url, title, content
|
@stable yes (using api)
|
||||||
|
@parse url, title, content
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from json import loads
|
from json import loads
|
||||||
|
@ -37,7 +39,7 @@ def response(resp):
|
||||||
search_res = loads(resp.text)
|
search_res = loads(resp.text)
|
||||||
|
|
||||||
# check if items are recieved
|
# check if items are recieved
|
||||||
if not 'items' in search_res:
|
if 'items' not in search_res:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# parse results
|
# parse results
|
||||||
|
|
|
@ -8,39 +8,126 @@
|
||||||
# @stable no (HTML can change)
|
# @stable no (HTML can change)
|
||||||
# @parse url, title, content, suggestion
|
# @parse url, title, content, suggestion
|
||||||
|
|
||||||
|
import re
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from urlparse import urlparse, parse_qsl
|
from urlparse import urlparse, parse_qsl
|
||||||
from lxml import html
|
from lxml import html
|
||||||
from searx.poolrequests import get
|
from searx.poolrequests import get
|
||||||
from searx.engines.xpath import extract_text, extract_url
|
from searx.engines.xpath import extract_text, extract_url
|
||||||
|
|
||||||
|
|
||||||
# engine dependent config
|
# engine dependent config
|
||||||
categories = ['general']
|
categories = ['general']
|
||||||
paging = True
|
paging = True
|
||||||
language_support = True
|
language_support = True
|
||||||
|
use_locale_domain = True
|
||||||
|
|
||||||
|
# based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
|
||||||
|
default_hostname = 'www.google.com'
|
||||||
|
|
||||||
|
country_to_hostname = {
|
||||||
|
'BG': 'www.google.bg', # Bulgaria
|
||||||
|
'CZ': 'www.google.cz', # Czech Republic
|
||||||
|
'DE': 'www.google.de', # Germany
|
||||||
|
'DK': 'www.google.dk', # Denmark
|
||||||
|
'AT': 'www.google.at', # Austria
|
||||||
|
'CH': 'www.google.ch', # Switzerland
|
||||||
|
'GR': 'www.google.gr', # Greece
|
||||||
|
'AU': 'www.google.com.au', # Australia
|
||||||
|
'CA': 'www.google.ca', # Canada
|
||||||
|
'GB': 'www.google.co.uk', # United Kingdom
|
||||||
|
'ID': 'www.google.co.id', # Indonesia
|
||||||
|
'IE': 'www.google.ie', # Ireland
|
||||||
|
'IN': 'www.google.co.in', # India
|
||||||
|
'MY': 'www.google.com.my', # Malaysia
|
||||||
|
'NZ': 'www.google.co.nz', # New Zealand
|
||||||
|
'PH': 'www.google.com.ph', # Philippines
|
||||||
|
'SG': 'www.google.com.sg', # Singapore
|
||||||
|
# 'US': 'www.google.us', # United State, redirect to .com
|
||||||
|
'ZA': 'www.google.co.za', # South Africa
|
||||||
|
'AR': 'www.google.com.ar', # Argentina
|
||||||
|
'CL': 'www.google.cl', # Chile
|
||||||
|
'ES': 'www.google.es', # Span
|
||||||
|
'MX': 'www.google.com.mx', # Mexico
|
||||||
|
'EE': 'www.google.ee', # Estonia
|
||||||
|
'FI': 'www.google.fi', # Finland
|
||||||
|
'BE': 'www.google.be', # Belgium
|
||||||
|
'FR': 'www.google.fr', # France
|
||||||
|
'IL': 'www.google.co.il', # Israel
|
||||||
|
'HR': 'www.google.hr', # Croatia
|
||||||
|
'HU': 'www.google.hu', # Hungary
|
||||||
|
'IT': 'www.google.it', # Italy
|
||||||
|
'JP': 'www.google.co.jp', # Japan
|
||||||
|
'KR': 'www.google.co.kr', # South Korean
|
||||||
|
'LT': 'www.google.lt', # Lithuania
|
||||||
|
'LV': 'www.google.lv', # Latvia
|
||||||
|
'NO': 'www.google.no', # Norway
|
||||||
|
'NL': 'www.google.nl', # Netherlands
|
||||||
|
'PL': 'www.google.pl', # Poland
|
||||||
|
'BR': 'www.google.com.br', # Brazil
|
||||||
|
'PT': 'www.google.pt', # Portugal
|
||||||
|
'RO': 'www.google.ro', # Romania
|
||||||
|
'RU': 'www.google.ru', # Russia
|
||||||
|
'SK': 'www.google.sk', # Slovakia
|
||||||
|
'SL': 'www.google.si', # Slovenia (SL -> si)
|
||||||
|
'SE': 'www.google.se', # Sweden
|
||||||
|
'TH': 'www.google.co.th', # Thailand
|
||||||
|
'TR': 'www.google.com.tr', # Turkey
|
||||||
|
'UA': 'www.google.com.ua', # Ikraine
|
||||||
|
# 'CN': 'www.google.cn', # China, only from china ?
|
||||||
|
'HK': 'www.google.com.hk', # Hong kong
|
||||||
|
'TW': 'www.google.com.tw' # Taiwan
|
||||||
|
}
|
||||||
|
|
||||||
|
# osm
|
||||||
|
url_map = 'https://www.openstreetmap.org/'\
|
||||||
|
+ '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
google_hostname = 'www.google.com'
|
|
||||||
search_path = '/search'
|
search_path = '/search'
|
||||||
redirect_path = '/url'
|
search_url = ('https://{hostname}' +
|
||||||
images_path = '/images'
|
|
||||||
search_url = ('https://' +
|
|
||||||
google_hostname +
|
|
||||||
search_path +
|
search_path +
|
||||||
'?{query}&start={offset}&gbv=1')
|
'?{query}&start={offset}&gbv=1')
|
||||||
|
|
||||||
|
# other URLs
|
||||||
|
map_hostname_start = 'maps.google.'
|
||||||
|
maps_path = '/maps'
|
||||||
|
redirect_path = '/url'
|
||||||
|
images_path = '/images'
|
||||||
|
|
||||||
# specific xpath variables
|
# specific xpath variables
|
||||||
results_xpath = '//li[@class="g"]'
|
results_xpath = '//li[@class="g"]'
|
||||||
url_xpath = './/h3/a/@href'
|
url_xpath = './/h3/a/@href'
|
||||||
title_xpath = './/h3'
|
title_xpath = './/h3'
|
||||||
content_xpath = './/span[@class="st"]'
|
content_xpath = './/span[@class="st"]'
|
||||||
|
content_misc_xpath = './/div[@class="f slp"]'
|
||||||
suggestion_xpath = '//p[@class="_Bmc"]'
|
suggestion_xpath = '//p[@class="_Bmc"]'
|
||||||
|
|
||||||
|
# map : detail location
|
||||||
|
map_address_xpath = './/div[@class="s"]//table//td[2]/span/text()'
|
||||||
|
map_phone_xpath = './/div[@class="s"]//table//td[2]/span/span'
|
||||||
|
map_website_url_xpath = 'h3[2]/a/@href'
|
||||||
|
map_website_title_xpath = 'h3[2]'
|
||||||
|
|
||||||
|
# map : near the location
|
||||||
|
map_near = 'table[@class="ts"]//tr'
|
||||||
|
map_near_title = './/h4'
|
||||||
|
map_near_url = './/h4/a/@href'
|
||||||
|
map_near_phone = './/span[@class="nobr"]'
|
||||||
|
|
||||||
|
# images
|
||||||
images_xpath = './/div/a'
|
images_xpath = './/div/a'
|
||||||
image_url_xpath = './@href'
|
image_url_xpath = './@href'
|
||||||
image_img_src_xpath = './img/@src'
|
image_img_src_xpath = './img/@src'
|
||||||
|
|
||||||
|
# property names
|
||||||
|
# FIXME : no translation
|
||||||
|
property_address = "Address"
|
||||||
|
property_phone = "Phone number"
|
||||||
|
|
||||||
|
# cookies
|
||||||
pref_cookie = ''
|
pref_cookie = ''
|
||||||
|
nid_cookie = {}
|
||||||
|
|
||||||
|
|
||||||
# see https://support.google.com/websearch/answer/873?hl=en
|
# see https://support.google.com/websearch/answer/873?hl=en
|
||||||
|
@ -52,8 +139,21 @@ def get_google_pref_cookie():
|
||||||
return pref_cookie
|
return pref_cookie
|
||||||
|
|
||||||
|
|
||||||
|
def get_google_nid_cookie(google_hostname):
|
||||||
|
global nid_cookie
|
||||||
|
if google_hostname not in nid_cookie:
|
||||||
|
resp = get('https://' + google_hostname)
|
||||||
|
nid_cookie[google_hostname] = resp.cookies.get("NID", None)
|
||||||
|
return nid_cookie[google_hostname]
|
||||||
|
|
||||||
|
|
||||||
# remove google-specific tracking-url
|
# remove google-specific tracking-url
|
||||||
def parse_url(url_string):
|
def parse_url(url_string, google_hostname):
|
||||||
|
# sanity check
|
||||||
|
if url_string is None:
|
||||||
|
return url_string
|
||||||
|
|
||||||
|
# normal case
|
||||||
parsed_url = urlparse(url_string)
|
parsed_url = urlparse(url_string)
|
||||||
if (parsed_url.netloc in [google_hostname, '']
|
if (parsed_url.netloc in [google_hostname, '']
|
||||||
and parsed_url.path == redirect_path):
|
and parsed_url.path == redirect_path):
|
||||||
|
@ -63,20 +163,45 @@ def parse_url(url_string):
|
||||||
return url_string
|
return url_string
|
||||||
|
|
||||||
|
|
||||||
|
# returns extract_text on the first result selected by the xpath or None
|
||||||
|
def extract_text_from_dom(result, xpath):
|
||||||
|
r = result.xpath(xpath)
|
||||||
|
if len(r) > 0:
|
||||||
|
return extract_text(r[0])
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
# do search-request
|
# do search-request
|
||||||
def request(query, params):
|
def request(query, params):
|
||||||
offset = (params['pageno'] - 1) * 10
|
offset = (params['pageno'] - 1) * 10
|
||||||
|
|
||||||
if params['language'] == 'all':
|
if params['language'] == 'all':
|
||||||
language = 'en'
|
language = 'en'
|
||||||
|
country = 'US'
|
||||||
else:
|
else:
|
||||||
language = params['language'].replace('_', '-').lower()
|
language_array = params['language'].lower().split('_')
|
||||||
|
if len(language_array) == 2:
|
||||||
|
country = language_array[1]
|
||||||
|
else:
|
||||||
|
country = 'US'
|
||||||
|
language = language_array[0] + ',' + language_array[0] + '-' + country
|
||||||
|
|
||||||
|
if use_locale_domain:
|
||||||
|
google_hostname = country_to_hostname.get(country.upper(), default_hostname)
|
||||||
|
else:
|
||||||
|
google_hostname = default_hostname
|
||||||
|
|
||||||
params['url'] = search_url.format(offset=offset,
|
params['url'] = search_url.format(offset=offset,
|
||||||
query=urlencode({'q': query}))
|
query=urlencode({'q': query}),
|
||||||
|
hostname=google_hostname)
|
||||||
|
|
||||||
params['headers']['Accept-Language'] = language
|
params['headers']['Accept-Language'] = language
|
||||||
|
params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
|
||||||
|
if google_hostname == default_hostname:
|
||||||
params['cookies']['PREF'] = get_google_pref_cookie()
|
params['cookies']['PREF'] = get_google_pref_cookie()
|
||||||
|
params['cookies']['NID'] = get_google_nid_cookie(google_hostname)
|
||||||
|
|
||||||
|
params['google_hostname'] = google_hostname
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
@ -85,33 +210,63 @@ def request(query, params):
|
||||||
def response(resp):
|
def response(resp):
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
|
# detect google sorry
|
||||||
|
resp_url = urlparse(resp.url)
|
||||||
|
if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
|
||||||
|
raise RuntimeWarning('sorry.google.com')
|
||||||
|
|
||||||
|
# which hostname ?
|
||||||
|
google_hostname = resp.search_params.get('google_hostname')
|
||||||
|
google_url = "https://" + google_hostname
|
||||||
|
|
||||||
|
# convert the text to dom
|
||||||
dom = html.fromstring(resp.text)
|
dom = html.fromstring(resp.text)
|
||||||
|
|
||||||
# parse results
|
# parse results
|
||||||
for result in dom.xpath(results_xpath):
|
for result in dom.xpath(results_xpath):
|
||||||
title = extract_text(result.xpath(title_xpath)[0])
|
title = extract_text(result.xpath(title_xpath)[0])
|
||||||
try:
|
try:
|
||||||
url = parse_url(extract_url(result.xpath(url_xpath), search_url))
|
url = parse_url(extract_url(result.xpath(url_xpath), google_url), google_hostname)
|
||||||
parsed_url = urlparse(url)
|
parsed_url = urlparse(url, google_hostname)
|
||||||
if (parsed_url.netloc == google_hostname
|
|
||||||
|
# map result
|
||||||
|
if ((parsed_url.netloc == google_hostname and parsed_url.path.startswith(maps_path))
|
||||||
|
or (parsed_url.netloc.startswith(map_hostname_start))):
|
||||||
|
x = result.xpath(map_near)
|
||||||
|
if len(x) > 0:
|
||||||
|
# map : near the location
|
||||||
|
results = results + parse_map_near(parsed_url, x, google_hostname)
|
||||||
|
else:
|
||||||
|
# map : detail about a location
|
||||||
|
results = results + parse_map_detail(parsed_url, result, google_hostname)
|
||||||
|
|
||||||
|
# google news
|
||||||
|
elif (parsed_url.netloc == google_hostname
|
||||||
and parsed_url.path == search_path):
|
and parsed_url.path == search_path):
|
||||||
# remove the link to google news
|
# skipping news results
|
||||||
continue
|
pass
|
||||||
|
|
||||||
# images result
|
# images result
|
||||||
if (parsed_url.netloc == google_hostname
|
elif (parsed_url.netloc == google_hostname
|
||||||
and parsed_url.path == images_path):
|
and parsed_url.path == images_path):
|
||||||
# only thumbnail image provided,
|
# only thumbnail image provided,
|
||||||
# so skipping image results
|
# so skipping image results
|
||||||
# results = results + parse_images(result)
|
# results = results + parse_images(result, google_hostname)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# normal result
|
# normal result
|
||||||
content = extract_text(result.xpath(content_xpath)[0])
|
content = extract_text_from_dom(result, content_xpath)
|
||||||
|
if content is None:
|
||||||
|
continue
|
||||||
|
content_misc = extract_text_from_dom(result, content_misc_xpath)
|
||||||
|
if content_misc is not None:
|
||||||
|
content = content_misc + "<br />" + content
|
||||||
# append result
|
# append result
|
||||||
results.append({'url': url,
|
results.append({'url': url,
|
||||||
'title': title,
|
'title': title,
|
||||||
'content': content})
|
'content': content
|
||||||
|
})
|
||||||
except:
|
except:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -124,10 +279,10 @@ def response(resp):
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
def parse_images(result):
|
def parse_images(result, google_hostname):
|
||||||
results = []
|
results = []
|
||||||
for image in result.xpath(images_xpath):
|
for image in result.xpath(images_xpath):
|
||||||
url = parse_url(extract_text(image.xpath(image_url_xpath)[0]))
|
url = parse_url(extract_text(image.xpath(image_url_xpath)[0]), google_hostname)
|
||||||
img_src = extract_text(image.xpath(image_img_src_xpath)[0])
|
img_src = extract_text(image.xpath(image_img_src_xpath)[0])
|
||||||
|
|
||||||
# append result
|
# append result
|
||||||
|
@ -135,6 +290,77 @@ def parse_images(result):
|
||||||
'title': '',
|
'title': '',
|
||||||
'content': '',
|
'content': '',
|
||||||
'img_src': img_src,
|
'img_src': img_src,
|
||||||
'template': 'images.html'})
|
'template': 'images.html'
|
||||||
|
})
|
||||||
|
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def parse_map_near(parsed_url, x, google_hostname):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
for result in x:
|
||||||
|
title = extract_text_from_dom(result, map_near_title)
|
||||||
|
url = parse_url(extract_text_from_dom(result, map_near_url), google_hostname)
|
||||||
|
attributes = []
|
||||||
|
phone = extract_text_from_dom(result, map_near_phone)
|
||||||
|
add_attributes(attributes, property_phone, phone, 'tel:' + phone)
|
||||||
|
results.append({'title': title,
|
||||||
|
'url': url,
|
||||||
|
'content': attributes_to_html(attributes)
|
||||||
|
})
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def parse_map_detail(parsed_url, result, google_hostname):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
# try to parse the geoloc
|
||||||
|
m = re.search('@([0-9\.]+),([0-9\.]+),([0-9]+)', parsed_url.path)
|
||||||
|
if m is None:
|
||||||
|
m = re.search('ll\=([0-9\.]+),([0-9\.]+)\&z\=([0-9]+)', parsed_url.query)
|
||||||
|
|
||||||
|
if m is not None:
|
||||||
|
# geoloc found (ignored)
|
||||||
|
lon = float(m.group(2)) # noqa
|
||||||
|
lat = float(m.group(1)) # noqa
|
||||||
|
zoom = int(m.group(3)) # noqa
|
||||||
|
|
||||||
|
# attributes
|
||||||
|
attributes = []
|
||||||
|
address = extract_text_from_dom(result, map_address_xpath)
|
||||||
|
phone = extract_text_from_dom(result, map_phone_xpath)
|
||||||
|
add_attributes(attributes, property_address, address, 'geo:' + str(lat) + ',' + str(lon))
|
||||||
|
add_attributes(attributes, property_phone, phone, 'tel:' + phone)
|
||||||
|
|
||||||
|
# title / content / url
|
||||||
|
website_title = extract_text_from_dom(result, map_website_title_xpath)
|
||||||
|
content = extract_text_from_dom(result, content_xpath)
|
||||||
|
website_url = parse_url(extract_text_from_dom(result, map_website_url_xpath), google_hostname)
|
||||||
|
|
||||||
|
# add a result if there is a website
|
||||||
|
if website_url is not None:
|
||||||
|
results.append({'title': website_title,
|
||||||
|
'content': (content + '<br />' if content is not None else '')
|
||||||
|
+ attributes_to_html(attributes),
|
||||||
|
'url': website_url
|
||||||
|
})
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def add_attributes(attributes, name, value, url):
|
||||||
|
if value is not None and len(value) > 0:
|
||||||
|
attributes.append({'label': name, 'value': value, 'url': url})
|
||||||
|
|
||||||
|
|
||||||
|
def attributes_to_html(attributes):
|
||||||
|
retval = '<table class="table table-striped">'
|
||||||
|
for a in attributes:
|
||||||
|
value = a.get('value')
|
||||||
|
if 'url' in a:
|
||||||
|
value = '<a href="' + a.get('url') + '">' + value + '</a>'
|
||||||
|
retval = retval + '<tr><th>' + a.get('label') + '</th><td>' + value + '</td></tr>'
|
||||||
|
retval = retval + '</table>'
|
||||||
|
return retval
|
||||||
|
|
|
@ -1,13 +1,15 @@
|
||||||
## Google (Images)
|
"""
|
||||||
#
|
Google (Images)
|
||||||
# @website https://www.google.com
|
|
||||||
# @provide-api yes (https://developers.google.com/web-search/docs/),
|
@website https://www.google.com
|
||||||
# deprecated!
|
@provide-api yes (https://developers.google.com/web-search/docs/),
|
||||||
#
|
deprecated!
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes (but deprecated)
|
@results JSON
|
||||||
# @parse url, title, img_src
|
@stable yes (but deprecated)
|
||||||
|
@parse url, title, img_src
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode, unquote
|
from urllib import urlencode, unquote
|
||||||
from json import loads
|
from json import loads
|
||||||
|
@ -56,6 +58,9 @@ def response(resp):
|
||||||
continue
|
continue
|
||||||
thumbnail_src = result['tbUrl']
|
thumbnail_src = result['tbUrl']
|
||||||
|
|
||||||
|
# http to https
|
||||||
|
thumbnail_src = thumbnail_src.replace("http://", "https://")
|
||||||
|
|
||||||
# append result
|
# append result
|
||||||
results.append({'url': href,
|
results.append({'url': href,
|
||||||
'title': title,
|
'title': title,
|
||||||
|
|
|
@ -1,13 +1,15 @@
|
||||||
## Google (News)
|
"""
|
||||||
#
|
Google (News)
|
||||||
# @website https://www.google.com
|
|
||||||
# @provide-api yes (https://developers.google.com/web-search/docs/),
|
@website https://www.google.com
|
||||||
# deprecated!
|
@provide-api yes (https://developers.google.com/web-search/docs/),
|
||||||
#
|
deprecated!
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes (but deprecated)
|
@results JSON
|
||||||
# @parse url, title, content, publishedDate
|
@stable yes (but deprecated)
|
||||||
|
@parse url, title, content, publishedDate
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from json import loads
|
from json import loads
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Kickass Torrent (Videos, Music, Files)
|
"""
|
||||||
#
|
Kickass Torrent (Videos, Music, Files)
|
||||||
# @website https://kickass.so
|
|
||||||
# @provide-api no (nothing found)
|
@website https://kickass.so
|
||||||
#
|
@provide-api no (nothing found)
|
||||||
# @using-api no
|
|
||||||
# @results HTML (using search portal)
|
@using-api no
|
||||||
# @stable yes (HTML can change)
|
@results HTML (using search portal)
|
||||||
# @parse url, title, content, seed, leech, magnetlink
|
@stable yes (HTML can change)
|
||||||
|
@parse url, title, content, seed, leech, magnetlink
|
||||||
|
"""
|
||||||
|
|
||||||
from urlparse import urljoin
|
from urlparse import urljoin
|
||||||
from cgi import escape
|
from cgi import escape
|
||||||
|
@ -34,10 +36,6 @@ def request(query, params):
|
||||||
params['url'] = search_url.format(search_term=quote(query),
|
params['url'] = search_url.format(search_term=quote(query),
|
||||||
pageno=params['pageno'])
|
pageno=params['pageno'])
|
||||||
|
|
||||||
# FIX: SSLError: hostname 'kickass.so'
|
|
||||||
# doesn't match either of '*.kickass.to', 'kickass.to'
|
|
||||||
params['verify'] = False
|
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,16 @@
|
||||||
## general mediawiki-engine (Web)
|
"""
|
||||||
#
|
general mediawiki-engine (Web)
|
||||||
# @website websites built on mediawiki (https://www.mediawiki.org)
|
|
||||||
# @provide-api yes (http://www.mediawiki.org/wiki/API:Search)
|
@website websites built on mediawiki (https://www.mediawiki.org)
|
||||||
#
|
@provide-api yes (http://www.mediawiki.org/wiki/API:Search)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title
|
@stable yes
|
||||||
#
|
@parse url, title
|
||||||
# @todo content
|
|
||||||
|
@todo content
|
||||||
|
"""
|
||||||
|
|
||||||
from json import loads
|
from json import loads
|
||||||
from string import Formatter
|
from string import Formatter
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Mixcloud (Music)
|
"""
|
||||||
#
|
Mixcloud (Music)
|
||||||
# @website https://http://www.mixcloud.com/
|
|
||||||
# @provide-api yes (http://www.mixcloud.com/developers/
|
@website https://http://www.mixcloud.com/
|
||||||
#
|
@provide-api yes (http://www.mixcloud.com/developers/
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title, content, embedded, publishedDate
|
@stable yes
|
||||||
|
@parse url, title, content, embedded, publishedDate
|
||||||
|
"""
|
||||||
|
|
||||||
from json import loads
|
from json import loads
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
|
@ -17,7 +19,7 @@ categories = ['music']
|
||||||
paging = True
|
paging = True
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
url = 'http://api.mixcloud.com/'
|
url = 'https://api.mixcloud.com/'
|
||||||
search_url = url + 'search/?{query}&type=cloudcast&limit=10&offset={offset}'
|
search_url = url + 'search/?{query}&type=cloudcast&limit=10&offset={offset}'
|
||||||
|
|
||||||
embedded_url = '<iframe scrolling="no" frameborder="0" allowTransparency="true" ' +\
|
embedded_url = '<iframe scrolling="no" frameborder="0" allowTransparency="true" ' +\
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## OpenStreetMap (Map)
|
"""
|
||||||
#
|
OpenStreetMap (Map)
|
||||||
# @website https://openstreetmap.org/
|
|
||||||
# @provide-api yes (http://wiki.openstreetmap.org/wiki/Nominatim)
|
@website https://openstreetmap.org/
|
||||||
#
|
@provide-api yes (http://wiki.openstreetmap.org/wiki/Nominatim)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title
|
@stable yes
|
||||||
|
@parse url, title
|
||||||
|
"""
|
||||||
|
|
||||||
from json import loads
|
from json import loads
|
||||||
from searx.utils import searx_useragent
|
from searx.utils import searx_useragent
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Photon (Map)
|
"""
|
||||||
#
|
Photon (Map)
|
||||||
# @website https://photon.komoot.de
|
|
||||||
# @provide-api yes (https://photon.komoot.de/)
|
@website https://photon.komoot.de
|
||||||
#
|
@provide-api yes (https://photon.komoot.de/)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title
|
@stable yes
|
||||||
|
@parse url, title
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from json import loads
|
from json import loads
|
||||||
|
@ -41,9 +43,6 @@ def request(query, params):
|
||||||
# using searx User-Agent
|
# using searx User-Agent
|
||||||
params['headers']['User-Agent'] = searx_useragent()
|
params['headers']['User-Agent'] = searx_useragent()
|
||||||
|
|
||||||
# FIX: SSLError: SSL3_GET_SERVER_CERTIFICATE:certificate verify failed
|
|
||||||
params['verify'] = False
|
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
## Piratebay (Videos, Music, Files)
|
# Piratebay (Videos, Music, Files)
|
||||||
#
|
#
|
||||||
# @website https://thepiratebay.se
|
# @website https://thepiratebay.se
|
||||||
# @provide-api no (nothing found)
|
# @provide-api no (nothing found)
|
||||||
|
@ -78,7 +78,11 @@ def response(resp):
|
||||||
leech = 0
|
leech = 0
|
||||||
|
|
||||||
magnetlink = result.xpath(magnet_xpath)[0]
|
magnetlink = result.xpath(magnet_xpath)[0]
|
||||||
torrentfile = result.xpath(torrent_xpath)[0]
|
torrentfile_links = result.xpath(torrent_xpath)
|
||||||
|
if torrentfile_links:
|
||||||
|
torrentfile_link = torrentfile_links[0].attrib.get('href')
|
||||||
|
else:
|
||||||
|
torrentfile_link = None
|
||||||
|
|
||||||
# append result
|
# append result
|
||||||
results.append({'url': href,
|
results.append({'url': href,
|
||||||
|
@ -87,7 +91,7 @@ def response(resp):
|
||||||
'seed': seed,
|
'seed': seed,
|
||||||
'leech': leech,
|
'leech': leech,
|
||||||
'magnetlink': magnetlink.attrib.get('href'),
|
'magnetlink': magnetlink.attrib.get('href'),
|
||||||
'torrentfile': torrentfile.attrib.get('href'),
|
'torrentfile': torrentfile_link,
|
||||||
'template': 'torrent.html'})
|
'template': 'torrent.html'})
|
||||||
|
|
||||||
# return results sorted by seeder
|
# return results sorted by seeder
|
||||||
|
|
|
@ -0,0 +1,98 @@
|
||||||
|
"""
|
||||||
|
Qwant (Web, Images, News, Social)
|
||||||
|
|
||||||
|
@website https://qwant.com/
|
||||||
|
@provide-api not officially (https://api.qwant.com/api/search/)
|
||||||
|
|
||||||
|
@using-api yes
|
||||||
|
@results JSON
|
||||||
|
@stable yes
|
||||||
|
@parse url, title, content
|
||||||
|
"""
|
||||||
|
|
||||||
|
from urllib import urlencode
|
||||||
|
from json import loads
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
# engine dependent config
|
||||||
|
categories = None
|
||||||
|
paging = True
|
||||||
|
language_support = True
|
||||||
|
|
||||||
|
category_to_keyword = {'general': 'web',
|
||||||
|
'images': 'images',
|
||||||
|
'news': 'news',
|
||||||
|
'social media': 'social'}
|
||||||
|
|
||||||
|
# search-url
|
||||||
|
url = 'https://api.qwant.com/api/search/{keyword}?count=10&offset={offset}&f=&{query}'
|
||||||
|
|
||||||
|
|
||||||
|
# do search-request
|
||||||
|
def request(query, params):
|
||||||
|
offset = (params['pageno'] - 1) * 10
|
||||||
|
|
||||||
|
if categories[0] and categories[0] in category_to_keyword:
|
||||||
|
|
||||||
|
params['url'] = url.format(keyword=category_to_keyword[categories[0]],
|
||||||
|
query=urlencode({'q': query}),
|
||||||
|
offset=offset)
|
||||||
|
else:
|
||||||
|
params['url'] = url.format(keyword='web',
|
||||||
|
query=urlencode({'q': query}),
|
||||||
|
offset=offset)
|
||||||
|
|
||||||
|
# add language tag if specified
|
||||||
|
if params['language'] != 'all':
|
||||||
|
params['url'] += '&locale=' + params['language'].lower()
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
# get response from search-request
|
||||||
|
def response(resp):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
search_results = loads(resp.text)
|
||||||
|
|
||||||
|
# return empty array if there are no results
|
||||||
|
if 'data' not in search_results:
|
||||||
|
return []
|
||||||
|
|
||||||
|
data = search_results.get('data', {})
|
||||||
|
|
||||||
|
res = data.get('result', {})
|
||||||
|
|
||||||
|
# parse results
|
||||||
|
for result in res.get('items', {}):
|
||||||
|
|
||||||
|
title = result['title']
|
||||||
|
res_url = result['url']
|
||||||
|
content = result['desc']
|
||||||
|
|
||||||
|
if category_to_keyword.get(categories[0], '') == 'web':
|
||||||
|
results.append({'title': title,
|
||||||
|
'content': content,
|
||||||
|
'url': res_url})
|
||||||
|
|
||||||
|
elif category_to_keyword.get(categories[0], '') == 'images':
|
||||||
|
thumbnail_src = result['thumbnail']
|
||||||
|
img_src = result['media']
|
||||||
|
results.append({'template': 'images.html',
|
||||||
|
'url': res_url,
|
||||||
|
'title': title,
|
||||||
|
'content': '',
|
||||||
|
'thumbnail_src': thumbnail_src,
|
||||||
|
'img_src': img_src})
|
||||||
|
|
||||||
|
elif (category_to_keyword.get(categories[0], '') == 'news' or
|
||||||
|
category_to_keyword.get(categories[0], '') == 'social'):
|
||||||
|
published_date = datetime.fromtimestamp(result['date'], None)
|
||||||
|
|
||||||
|
results.append({'url': res_url,
|
||||||
|
'title': title,
|
||||||
|
'publishedDate': published_date,
|
||||||
|
'content': content})
|
||||||
|
|
||||||
|
# return results
|
||||||
|
return results
|
|
@ -1,12 +1,14 @@
|
||||||
## Searchcode (It)
|
"""
|
||||||
#
|
Searchcode (It)
|
||||||
# @website https://searchcode.com/
|
|
||||||
# @provide-api yes (https://searchcode.com/api/)
|
@website https://searchcode.com/
|
||||||
#
|
@provide-api yes (https://searchcode.com/api/)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title, content
|
@stable yes
|
||||||
|
@parse url, title, content
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from json import loads
|
from json import loads
|
||||||
|
@ -32,6 +34,11 @@ def request(query, params):
|
||||||
params['url'] = search_url.format(query=urlencode({'q': query}),
|
params['url'] = search_url.format(query=urlencode({'q': query}),
|
||||||
pageno=params['pageno']-1)
|
pageno=params['pageno']-1)
|
||||||
|
|
||||||
|
# Disable SSL verification
|
||||||
|
# error: (60) SSL certificate problem: unable to get local issuer
|
||||||
|
# certificate
|
||||||
|
params['verify'] = False
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Searchcode (It)
|
"""
|
||||||
#
|
Searchcode (It)
|
||||||
# @website https://searchcode.com/
|
|
||||||
# @provide-api yes (https://searchcode.com/api/)
|
@website https://searchcode.com/
|
||||||
#
|
@provide-api yes (https://searchcode.com/api/)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title, content
|
@stable yes
|
||||||
|
@parse url, title, content
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from json import loads
|
from json import loads
|
||||||
|
@ -25,6 +27,11 @@ def request(query, params):
|
||||||
params['url'] = search_url.format(query=urlencode({'q': query}),
|
params['url'] = search_url.format(query=urlencode({'q': query}),
|
||||||
pageno=params['pageno']-1)
|
pageno=params['pageno']-1)
|
||||||
|
|
||||||
|
# Disable SSL verification
|
||||||
|
# error: (60) SSL certificate problem: unable to get local issuer
|
||||||
|
# certificate
|
||||||
|
params['verify'] = False
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Soundcloud (Music)
|
"""
|
||||||
#
|
Soundcloud (Music)
|
||||||
# @website https://soundcloud.com
|
|
||||||
# @provide-api yes (https://developers.soundcloud.com/)
|
@website https://soundcloud.com
|
||||||
#
|
@provide-api yes (https://developers.soundcloud.com/)
|
||||||
# @using-api yes
|
|
||||||
# @results JSON
|
@using-api yes
|
||||||
# @stable yes
|
@results JSON
|
||||||
# @parse url, title, content, publishedDate, embedded
|
@stable yes
|
||||||
|
@parse url, title, content, publishedDate, embedded
|
||||||
|
"""
|
||||||
|
|
||||||
from json import loads
|
from json import loads
|
||||||
from urllib import urlencode, quote_plus
|
from urllib import urlencode, quote_plus
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
"""
|
||||||
|
Spotify (Music)
|
||||||
|
|
||||||
|
@website https://spotify.com
|
||||||
|
@provide-api yes (https://developer.spotify.com/web-api/search-item/)
|
||||||
|
|
||||||
|
@using-api yes
|
||||||
|
@results JSON
|
||||||
|
@stable yes
|
||||||
|
@parse url, title, content, embedded
|
||||||
|
"""
|
||||||
|
|
||||||
|
from json import loads
|
||||||
|
from urllib import urlencode
|
||||||
|
|
||||||
|
# engine dependent config
|
||||||
|
categories = ['music']
|
||||||
|
paging = True
|
||||||
|
|
||||||
|
# search-url
|
||||||
|
url = 'https://api.spotify.com/'
|
||||||
|
search_url = url + 'v1/search?{query}&type=track&offset={offset}'
|
||||||
|
|
||||||
|
embedded_url = '<iframe data-src="https://embed.spotify.com/?uri=spotify:track:{audioid}"\
|
||||||
|
width="300" height="80" frameborder="0" allowtransparency="true"></iframe>'
|
||||||
|
|
||||||
|
|
||||||
|
# do search-request
|
||||||
|
def request(query, params):
|
||||||
|
offset = (params['pageno'] - 1) * 20
|
||||||
|
|
||||||
|
params['url'] = search_url.format(query=urlencode({'q': query}),
|
||||||
|
offset=offset)
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
# get response from search-request
|
||||||
|
def response(resp):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
search_res = loads(resp.text)
|
||||||
|
|
||||||
|
# parse results
|
||||||
|
for result in search_res.get('tracks', {}).get('items', {}):
|
||||||
|
if result['type'] == 'track':
|
||||||
|
title = result['name']
|
||||||
|
url = result['external_urls']['spotify']
|
||||||
|
content = result['artists'][0]['name'] +\
|
||||||
|
" • " +\
|
||||||
|
result['album']['name'] +\
|
||||||
|
" • " + result['name']
|
||||||
|
embedded = embedded_url.format(audioid=result['id'])
|
||||||
|
|
||||||
|
# append result
|
||||||
|
results.append({'url': url,
|
||||||
|
'title': title,
|
||||||
|
'embedded': embedded,
|
||||||
|
'content': content})
|
||||||
|
|
||||||
|
# return results
|
||||||
|
return results
|
|
@ -1,12 +1,14 @@
|
||||||
## Stackoverflow (It)
|
"""
|
||||||
#
|
Stackoverflow (It)
|
||||||
# @website https://stackoverflow.com/
|
|
||||||
# @provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
|
@website https://stackoverflow.com/
|
||||||
#
|
@provide-api not clear (https://api.stackexchange.com/docs/advanced-search)
|
||||||
# @using-api no
|
|
||||||
# @results HTML
|
@using-api no
|
||||||
# @stable no (HTML can change)
|
@results HTML
|
||||||
# @parse url, title, content
|
@stable no (HTML can change)
|
||||||
|
@parse url, title, content
|
||||||
|
"""
|
||||||
|
|
||||||
from urlparse import urljoin
|
from urlparse import urljoin
|
||||||
from cgi import escape
|
from cgi import escape
|
||||||
|
@ -19,7 +21,7 @@ categories = ['it']
|
||||||
paging = True
|
paging = True
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
url = 'http://stackoverflow.com/'
|
url = 'https://stackoverflow.com/'
|
||||||
search_url = url+'search?{query}&page={pageno}'
|
search_url = url+'search?{query}&page={pageno}'
|
||||||
|
|
||||||
# specific xpath variables
|
# specific xpath variables
|
||||||
|
|
|
@ -66,7 +66,15 @@ def response(resp):
|
||||||
url = link.attrib.get('href')
|
url = link.attrib.get('href')
|
||||||
|
|
||||||
# block google-ad url's
|
# block google-ad url's
|
||||||
if re.match("^http(s|)://www.google.[a-z]+/aclk.*$", url):
|
if re.match("^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# block startpage search url's
|
||||||
|
if re.match("^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# block ixquick search url's
|
||||||
|
if re.match("^http(s|)://(www\.)?ixquick\.com/do/search\?.*$", url):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
title = escape(extract_text(link))
|
title = escape(extract_text(link))
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
## Subtitleseeker (Video)
|
"""
|
||||||
#
|
Subtitleseeker (Video)
|
||||||
# @website http://www.subtitleseeker.com
|
|
||||||
# @provide-api no
|
@website http://www.subtitleseeker.com
|
||||||
#
|
@provide-api no
|
||||||
# @using-api no
|
|
||||||
# @results HTML
|
@using-api no
|
||||||
# @stable no (HTML can change)
|
@results HTML
|
||||||
# @parse url, title, content
|
@stable no (HTML can change)
|
||||||
|
@parse url, title, content
|
||||||
|
"""
|
||||||
|
|
||||||
from cgi import escape
|
from cgi import escape
|
||||||
from urllib import quote_plus
|
from urllib import quote_plus
|
||||||
|
|
|
@ -0,0 +1,108 @@
|
||||||
|
"""
|
||||||
|
Swisscows (Web, Images)
|
||||||
|
|
||||||
|
@website https://swisscows.ch
|
||||||
|
@provide-api no
|
||||||
|
|
||||||
|
@using-api no
|
||||||
|
@results HTML (using search portal)
|
||||||
|
@stable no (HTML can change)
|
||||||
|
@parse url, title, content
|
||||||
|
"""
|
||||||
|
|
||||||
|
from json import loads
|
||||||
|
from urllib import urlencode, unquote
|
||||||
|
import re
|
||||||
|
|
||||||
|
# engine dependent config
|
||||||
|
categories = ['general', 'images']
|
||||||
|
paging = True
|
||||||
|
language_support = True
|
||||||
|
|
||||||
|
# search-url
|
||||||
|
base_url = 'https://swisscows.ch/'
|
||||||
|
search_string = '?{query}&page={page}'
|
||||||
|
|
||||||
|
# regex
|
||||||
|
regex_json = re.compile('initialData: {"Request":(.|\n)*},\s*environment')
|
||||||
|
regex_json_remove_start = re.compile('^initialData:\s*')
|
||||||
|
regex_json_remove_end = re.compile(',\s*environment$')
|
||||||
|
regex_img_url_remove_start = re.compile('^https?://i\.swisscows\.ch/\?link=')
|
||||||
|
|
||||||
|
|
||||||
|
# do search-request
|
||||||
|
def request(query, params):
|
||||||
|
if params['language'] == 'all':
|
||||||
|
ui_language = 'browser'
|
||||||
|
region = 'browser'
|
||||||
|
else:
|
||||||
|
region = params['language'].replace('_', '-')
|
||||||
|
ui_language = params['language'].split('_')[0]
|
||||||
|
|
||||||
|
search_path = search_string.format(
|
||||||
|
query=urlencode({'query': query,
|
||||||
|
'uiLanguage': ui_language,
|
||||||
|
'region': region}),
|
||||||
|
page=params['pageno'])
|
||||||
|
|
||||||
|
# image search query is something like 'image?{query}&page={page}'
|
||||||
|
if params['category'] == 'images':
|
||||||
|
search_path = 'image' + search_path
|
||||||
|
|
||||||
|
params['url'] = base_url + search_path
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
# get response from search-request
|
||||||
|
def response(resp):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
json_regex = regex_json.search(resp.content)
|
||||||
|
|
||||||
|
# check if results are returned
|
||||||
|
if not json_regex:
|
||||||
|
return []
|
||||||
|
|
||||||
|
json_raw = regex_json_remove_end.sub('', regex_json_remove_start.sub('', json_regex.group()))
|
||||||
|
json = loads(json_raw)
|
||||||
|
|
||||||
|
# parse results
|
||||||
|
for result in json['Results'].get('items', []):
|
||||||
|
result_title = result['Title'].replace(u'\uE000', '').replace(u'\uE001', '')
|
||||||
|
|
||||||
|
# parse image results
|
||||||
|
if result.get('ContentType', '').startswith('image'):
|
||||||
|
img_url = unquote(regex_img_url_remove_start.sub('', result['Url']))
|
||||||
|
|
||||||
|
# append result
|
||||||
|
results.append({'url': result['SourceUrl'],
|
||||||
|
'title': result['Title'],
|
||||||
|
'content': '',
|
||||||
|
'img_src': img_url,
|
||||||
|
'template': 'images.html'})
|
||||||
|
|
||||||
|
# parse general results
|
||||||
|
else:
|
||||||
|
result_url = result['Url'].replace(u'\uE000', '').replace(u'\uE001', '')
|
||||||
|
result_content = result['Description'].replace(u'\uE000', '').replace(u'\uE001', '')
|
||||||
|
|
||||||
|
# append result
|
||||||
|
results.append({'url': result_url,
|
||||||
|
'title': result_title,
|
||||||
|
'content': result_content})
|
||||||
|
|
||||||
|
# parse images
|
||||||
|
for result in json.get('Images', []):
|
||||||
|
# decode image url
|
||||||
|
img_url = unquote(regex_img_url_remove_start.sub('', result['Url']))
|
||||||
|
|
||||||
|
# append result
|
||||||
|
results.append({'url': result['SourceUrl'],
|
||||||
|
'title': result['Title'],
|
||||||
|
'content': '',
|
||||||
|
'img_src': img_url,
|
||||||
|
'template': 'images.html'})
|
||||||
|
|
||||||
|
# return results
|
||||||
|
return results
|
|
@ -1,14 +1,16 @@
|
||||||
## Twitter (Social media)
|
"""
|
||||||
#
|
Twitter (Social media)
|
||||||
# @website https://twitter.com/
|
|
||||||
# @provide-api yes (https://dev.twitter.com/docs/using-search)
|
@website https://twitter.com/
|
||||||
#
|
@provide-api yes (https://dev.twitter.com/docs/using-search)
|
||||||
# @using-api no
|
|
||||||
# @results HTML (using search portal)
|
@using-api no
|
||||||
# @stable no (HTML can change)
|
@results HTML (using search portal)
|
||||||
# @parse url, title, content
|
@stable no (HTML can change)
|
||||||
#
|
@parse url, title, content
|
||||||
# @todo publishedDate
|
|
||||||
|
@todo publishedDate
|
||||||
|
"""
|
||||||
|
|
||||||
from urlparse import urljoin
|
from urlparse import urljoin
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
|
@ -27,8 +29,8 @@ search_url = base_url + 'search?'
|
||||||
# specific xpath variables
|
# specific xpath variables
|
||||||
results_xpath = '//li[@data-item-type="tweet"]'
|
results_xpath = '//li[@data-item-type="tweet"]'
|
||||||
link_xpath = './/small[@class="time"]//a'
|
link_xpath = './/small[@class="time"]//a'
|
||||||
title_xpath = './/span[@class="username js-action-profile-name"]'
|
title_xpath = './/span[contains(@class, "username")]'
|
||||||
content_xpath = './/p[@class="js-tweet-text tweet-text"]'
|
content_xpath = './/p[contains(@class, "tweet-text")]'
|
||||||
timestamp_xpath = './/span[contains(@class,"_timestamp")]'
|
timestamp_xpath = './/span[contains(@class,"_timestamp")]'
|
||||||
|
|
||||||
|
|
||||||
|
@ -53,10 +55,14 @@ def response(resp):
|
||||||
|
|
||||||
# parse results
|
# parse results
|
||||||
for tweet in dom.xpath(results_xpath):
|
for tweet in dom.xpath(results_xpath):
|
||||||
|
try:
|
||||||
link = tweet.xpath(link_xpath)[0]
|
link = tweet.xpath(link_xpath)[0]
|
||||||
|
content = extract_text(tweet.xpath(content_xpath)[0])
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
url = urljoin(base_url, link.attrib.get('href'))
|
url = urljoin(base_url, link.attrib.get('href'))
|
||||||
title = extract_text(tweet.xpath(title_xpath))
|
title = extract_text(tweet.xpath(title_xpath))
|
||||||
content = extract_text(tweet.xpath(content_xpath)[0])
|
|
||||||
|
|
||||||
pubdate = tweet.xpath(timestamp_xpath)
|
pubdate = tweet.xpath(timestamp_xpath)
|
||||||
if len(pubdate) > 0:
|
if len(pubdate) > 0:
|
||||||
|
|
|
@ -23,15 +23,15 @@ categories = ['videos']
|
||||||
paging = True
|
paging = True
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
base_url = 'http://vimeo.com'
|
base_url = 'https://vimeo.com'
|
||||||
search_url = base_url + '/search/page:{pageno}?{query}'
|
search_url = base_url + '/search/page:{pageno}?{query}'
|
||||||
|
|
||||||
# specific xpath variables
|
# specific xpath variables
|
||||||
results_xpath = '//div[@id="browse_content"]/ol/li'
|
results_xpath = '//div[contains(@class,"results_grid")]/ul/li'
|
||||||
url_xpath = './a/@href'
|
url_xpath = './/a/@href'
|
||||||
title_xpath = './a/div[@class="data"]/p[@class="title"]'
|
title_xpath = './/span[@class="title"]'
|
||||||
content_xpath = './a/img/@src'
|
thumbnail_xpath = './/img[@class="js-clip_thumbnail_image"]/@src'
|
||||||
publishedDate_xpath = './/p[@class="meta"]//attribute::datetime'
|
publishedDate_xpath = './/time/attribute::datetime'
|
||||||
|
|
||||||
embedded_url = '<iframe data-src="//player.vimeo.com/video{videoid}" ' +\
|
embedded_url = '<iframe data-src="//player.vimeo.com/video{videoid}" ' +\
|
||||||
'width="540" height="304" frameborder="0" ' +\
|
'width="540" height="304" frameborder="0" ' +\
|
||||||
|
@ -58,7 +58,7 @@ def response(resp):
|
||||||
videoid = result.xpath(url_xpath)[0]
|
videoid = result.xpath(url_xpath)[0]
|
||||||
url = base_url + videoid
|
url = base_url + videoid
|
||||||
title = p.unescape(extract_text(result.xpath(title_xpath)))
|
title = p.unescape(extract_text(result.xpath(title_xpath)))
|
||||||
thumbnail = extract_text(result.xpath(content_xpath)[0])
|
thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])
|
||||||
publishedDate = parser.parse(extract_text(result.xpath(publishedDate_xpath)[0]))
|
publishedDate = parser.parse(extract_text(result.xpath(publishedDate_xpath)[0]))
|
||||||
embedded = embedded_url.format(videoid=videoid)
|
embedded = embedded_url.format(videoid=videoid)
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,15 @@
|
||||||
import json
|
import json
|
||||||
from urllib import urlencode
|
|
||||||
|
from searx import logger
|
||||||
from searx.poolrequests import get
|
from searx.poolrequests import get
|
||||||
from searx.utils import format_date_by_locale
|
from searx.utils import format_date_by_locale
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from dateutil.parser import parse as dateutil_parse
|
||||||
|
from urllib import urlencode
|
||||||
|
|
||||||
|
|
||||||
|
logger = logger.getChild('wikidata')
|
||||||
result_count = 1
|
result_count = 1
|
||||||
wikidata_host = 'https://www.wikidata.org'
|
wikidata_host = 'https://www.wikidata.org'
|
||||||
wikidata_api = wikidata_host + '/w/api.php'
|
wikidata_api = wikidata_host + '/w/api.php'
|
||||||
|
@ -164,14 +171,12 @@ def getDetail(jsonresponse, wikidata_id, language, locale):
|
||||||
if postal_code is not None:
|
if postal_code is not None:
|
||||||
attributes.append({'label': 'Postal code(s)', 'value': postal_code})
|
attributes.append({'label': 'Postal code(s)', 'value': postal_code})
|
||||||
|
|
||||||
date_of_birth = get_time(claims, 'P569', None)
|
date_of_birth = get_time(claims, 'P569', locale, None)
|
||||||
if date_of_birth is not None:
|
if date_of_birth is not None:
|
||||||
date_of_birth = format_date_by_locale(date_of_birth[8:], locale)
|
|
||||||
attributes.append({'label': 'Date of birth', 'value': date_of_birth})
|
attributes.append({'label': 'Date of birth', 'value': date_of_birth})
|
||||||
|
|
||||||
date_of_death = get_time(claims, 'P570', None)
|
date_of_death = get_time(claims, 'P570', locale, None)
|
||||||
if date_of_death is not None:
|
if date_of_death is not None:
|
||||||
date_of_death = format_date_by_locale(date_of_death[8:], locale)
|
|
||||||
attributes.append({'label': 'Date of death', 'value': date_of_death})
|
attributes.append({'label': 'Date of death', 'value': date_of_death})
|
||||||
|
|
||||||
if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
|
if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
|
||||||
|
@ -229,7 +234,7 @@ def get_string(claims, propertyName, defaultValue=None):
|
||||||
return result[0]
|
return result[0]
|
||||||
|
|
||||||
|
|
||||||
def get_time(claims, propertyName, defaultValue=None):
|
def get_time(claims, propertyName, locale, defaultValue=None):
|
||||||
propValue = claims.get(propertyName, {})
|
propValue = claims.get(propertyName, {})
|
||||||
if len(propValue) == 0:
|
if len(propValue) == 0:
|
||||||
return defaultValue
|
return defaultValue
|
||||||
|
@ -244,9 +249,22 @@ def get_time(claims, propertyName, defaultValue=None):
|
||||||
result.append(value.get('time', ''))
|
result.append(value.get('time', ''))
|
||||||
|
|
||||||
if len(result) == 0:
|
if len(result) == 0:
|
||||||
return defaultValue
|
date_string = defaultValue
|
||||||
else:
|
else:
|
||||||
return ', '.join(result)
|
date_string = ', '.join(result)
|
||||||
|
|
||||||
|
try:
|
||||||
|
parsed_date = datetime.strptime(date_string, "+%Y-%m-%dT%H:%M:%SZ")
|
||||||
|
except:
|
||||||
|
if date_string.startswith('-'):
|
||||||
|
return date_string.split('T')[0]
|
||||||
|
try:
|
||||||
|
parsed_date = dateutil_parse(date_string, fuzzy=False, default=False)
|
||||||
|
except:
|
||||||
|
logger.debug('could not parse date %s', date_string)
|
||||||
|
return date_string.split('T')[0]
|
||||||
|
|
||||||
|
return format_date_by_locale(parsed_date, locale)
|
||||||
|
|
||||||
|
|
||||||
def get_geolink(claims, propertyName, defaultValue=''):
|
def get_geolink(claims, propertyName, defaultValue=''):
|
||||||
|
|
|
@ -1,13 +1,14 @@
|
||||||
## 1x (Images)
|
"""
|
||||||
#
|
1x (Images)
|
||||||
# @website http://1x.com/
|
|
||||||
# @provide-api no
|
|
||||||
#
|
|
||||||
# @using-api no
|
|
||||||
# @results HTML
|
|
||||||
# @stable no (HTML can change)
|
|
||||||
# @parse url, title, thumbnail, img_src, content
|
|
||||||
|
|
||||||
|
@website http://1x.com/
|
||||||
|
@provide-api no
|
||||||
|
|
||||||
|
@using-api no
|
||||||
|
@results HTML
|
||||||
|
@stable no (HTML can change)
|
||||||
|
@parse url, title, thumbnail, img_src, content
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from urlparse import urljoin
|
from urlparse import urljoin
|
||||||
|
@ -20,7 +21,7 @@ categories = ['images']
|
||||||
paging = False
|
paging = False
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
base_url = 'http://1x.com'
|
base_url = 'https://1x.com'
|
||||||
search_url = base_url+'/backend/search.php?{query}'
|
search_url = base_url+'/backend/search.php?{query}'
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,16 @@
|
||||||
## 500px (Images)
|
"""
|
||||||
#
|
500px (Images)
|
||||||
# @website https://500px.com
|
|
||||||
# @provide-api yes (https://developers.500px.com/)
|
@website https://500px.com
|
||||||
#
|
@provide-api yes (https://developers.500px.com/)
|
||||||
# @using-api no
|
|
||||||
# @results HTML
|
@using-api no
|
||||||
# @stable no (HTML can change)
|
@results HTML
|
||||||
# @parse url, title, thumbnail, img_src, content
|
@stable no (HTML can change)
|
||||||
#
|
@parse url, title, thumbnail, img_src, content
|
||||||
# @todo rewrite to api
|
|
||||||
|
@todo rewrite to api
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
## Yacy (Web, Images, Videos, Music, Files)
|
# Yacy (Web, Images, Videos, Music, Files)
|
||||||
#
|
#
|
||||||
# @website http://yacy.net
|
# @website http://yacy.net
|
||||||
# @provide-api yes
|
# @provide-api yes
|
||||||
|
|
|
@ -1,13 +1,15 @@
|
||||||
## Yahoo (Web)
|
"""
|
||||||
#
|
Yahoo (Web)
|
||||||
# @website https://search.yahoo.com/web
|
|
||||||
# @provide-api yes (https://developer.yahoo.com/boss/search/),
|
@website https://search.yahoo.com/web
|
||||||
# $0.80/1000 queries
|
@provide-api yes (https://developer.yahoo.com/boss/search/),
|
||||||
#
|
$0.80/1000 queries
|
||||||
# @using-api no (because pricing)
|
|
||||||
# @results HTML (using search portal)
|
@using-api no (because pricing)
|
||||||
# @stable no (HTML can change)
|
@results HTML (using search portal)
|
||||||
# @parse url, title, content, suggestion
|
@stable no (HTML can change)
|
||||||
|
@parse url, title, content, suggestion
|
||||||
|
"""
|
||||||
|
|
||||||
from urllib import urlencode
|
from urllib import urlencode
|
||||||
from urlparse import unquote
|
from urlparse import unquote
|
||||||
|
@ -24,11 +26,11 @@ base_url = 'https://search.yahoo.com/'
|
||||||
search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
|
search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
|
||||||
|
|
||||||
# specific xpath variables
|
# specific xpath variables
|
||||||
results_xpath = '//div[@class="res"]'
|
results_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' Sr ')]"
|
||||||
url_xpath = './/h3/a/@href'
|
url_xpath = './/h3/a/@href'
|
||||||
title_xpath = './/h3/a'
|
title_xpath = './/h3/a'
|
||||||
content_xpath = './/div[@class="abstr"]'
|
content_xpath = './/div[@class="compText aAbs"]'
|
||||||
suggestion_xpath = '//div[@id="satat"]//a'
|
suggestion_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' AlsoTry ')]//a"
|
||||||
|
|
||||||
|
|
||||||
# remove yahoo-specific tracking-url
|
# remove yahoo-specific tracking-url
|
||||||
|
@ -91,11 +93,12 @@ def response(resp):
|
||||||
'content': content})
|
'content': content})
|
||||||
|
|
||||||
# if no suggestion found, return results
|
# if no suggestion found, return results
|
||||||
if not dom.xpath(suggestion_xpath):
|
suggestions = dom.xpath(suggestion_xpath)
|
||||||
|
if not suggestions:
|
||||||
return results
|
return results
|
||||||
|
|
||||||
# parse suggestion
|
# parse suggestion
|
||||||
for suggestion in dom.xpath(suggestion_xpath):
|
for suggestion in suggestions:
|
||||||
# append suggestion
|
# append suggestion
|
||||||
results.append({'suggestion': extract_text(suggestion)})
|
results.append({'suggestion': extract_text(suggestion)})
|
||||||
|
|
||||||
|
|
|
@ -23,15 +23,15 @@ paging = True
|
||||||
language_support = True
|
language_support = True
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
search_url = 'https://news.search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}' # noqa
|
search_url = 'https://news.search.yahoo.com/search?{query}&b={offset}&{lang}=uh3_news_web_gs_1&pz=10&xargs=0&vl=lang_{lang}' # noqa
|
||||||
|
|
||||||
# specific xpath variables
|
# specific xpath variables
|
||||||
results_xpath = '//div[@class="res"]'
|
results_xpath = '//ol[contains(@class,"searchCenterMiddle")]//li'
|
||||||
url_xpath = './/h3/a/@href'
|
url_xpath = './/h3/a/@href'
|
||||||
title_xpath = './/h3/a'
|
title_xpath = './/h3/a'
|
||||||
content_xpath = './/div[@class="abstr"]'
|
content_xpath = './/div[@class="compText"]'
|
||||||
publishedDate_xpath = './/span[@class="timestamp"]'
|
publishedDate_xpath = './/span[contains(@class,"tri")]'
|
||||||
suggestion_xpath = '//div[@id="satat"]//a'
|
suggestion_xpath = '//div[contains(@class,"VerALSOTRY")]//a'
|
||||||
|
|
||||||
|
|
||||||
# do search-request
|
# do search-request
|
||||||
|
@ -48,11 +48,18 @@ def request(query, params):
|
||||||
lang=language)
|
lang=language)
|
||||||
|
|
||||||
# TODO required?
|
# TODO required?
|
||||||
params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
|
params['cookies']['sB'] = '"v=1&vm=p&fl=1&vl=lang_{lang}&sh=1&pn=10&rw=new'\
|
||||||
.format(lang=language)
|
.format(lang=language)
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def sanitize_url(url):
|
||||||
|
if ".yahoo.com/" in url:
|
||||||
|
return re.sub(u"\;\_ylt\=.+$", "", url)
|
||||||
|
else:
|
||||||
|
return url
|
||||||
|
|
||||||
|
|
||||||
# get response from search-request
|
# get response from search-request
|
||||||
def response(resp):
|
def response(resp):
|
||||||
results = []
|
results = []
|
||||||
|
@ -61,13 +68,17 @@ def response(resp):
|
||||||
|
|
||||||
# parse results
|
# parse results
|
||||||
for result in dom.xpath(results_xpath):
|
for result in dom.xpath(results_xpath):
|
||||||
url = parse_url(extract_url(result.xpath(url_xpath), search_url))
|
urls = result.xpath(url_xpath)
|
||||||
|
if len(urls) != 1:
|
||||||
|
continue
|
||||||
|
url = sanitize_url(parse_url(extract_url(urls, search_url)))
|
||||||
title = extract_text(result.xpath(title_xpath)[0])
|
title = extract_text(result.xpath(title_xpath)[0])
|
||||||
content = extract_text(result.xpath(content_xpath)[0])
|
content = extract_text(result.xpath(content_xpath)[0])
|
||||||
|
|
||||||
# parse publishedDate
|
# parse publishedDate
|
||||||
publishedDate = extract_text(result.xpath(publishedDate_xpath)[0])
|
publishedDate = extract_text(result.xpath(publishedDate_xpath)[0])
|
||||||
|
|
||||||
|
# still useful ?
|
||||||
if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
|
if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
|
||||||
publishedDate = datetime.now() - timedelta(minutes=int(re.match(r'\d+', publishedDate).group())) # noqa
|
publishedDate = datetime.now() - timedelta(minutes=int(re.match(r'\d+', publishedDate).group())) # noqa
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
## Youtube (Videos)
|
# Youtube (Videos)
|
||||||
#
|
#
|
||||||
# @website https://www.youtube.com/
|
# @website https://www.youtube.com/
|
||||||
# @provide-api yes (http://gdata-samples-youtube-search-py.appspot.com/)
|
# @provide-api yes (https://developers.google.com/apis-explorer/#p/youtube/v3/youtube.search.list)
|
||||||
#
|
#
|
||||||
# @using-api yes
|
# @using-api yes
|
||||||
# @results JSON
|
# @results JSON
|
||||||
|
@ -14,28 +14,29 @@ from dateutil import parser
|
||||||
|
|
||||||
# engine dependent config
|
# engine dependent config
|
||||||
categories = ['videos', 'music']
|
categories = ['videos', 'music']
|
||||||
paging = True
|
paging = False
|
||||||
language_support = True
|
language_support = True
|
||||||
|
api_key = None
|
||||||
|
|
||||||
# search-url
|
# search-url
|
||||||
base_url = 'https://gdata.youtube.com/feeds/api/videos'
|
base_url = 'https://www.googleapis.com/youtube/v3/search'
|
||||||
search_url = base_url + '?alt=json&{query}&start-index={index}&max-results=5'
|
search_url = base_url + '?part=snippet&{query}&maxResults=20&key={api_key}'
|
||||||
|
|
||||||
embedded_url = '<iframe width="540" height="304" ' +\
|
embedded_url = '<iframe width="540" height="304" ' +\
|
||||||
'data-src="//www.youtube-nocookie.com/embed/{videoid}" ' +\
|
'data-src="//www.youtube-nocookie.com/embed/{videoid}" ' +\
|
||||||
'frameborder="0" allowfullscreen></iframe>'
|
'frameborder="0" allowfullscreen></iframe>'
|
||||||
|
|
||||||
|
base_youtube_url = 'https://www.youtube.com/watch?v='
|
||||||
|
|
||||||
|
|
||||||
# do search-request
|
# do search-request
|
||||||
def request(query, params):
|
def request(query, params):
|
||||||
index = (params['pageno'] - 1) * 5 + 1
|
|
||||||
|
|
||||||
params['url'] = search_url.format(query=urlencode({'q': query}),
|
params['url'] = search_url.format(query=urlencode({'q': query}),
|
||||||
index=index)
|
api_key=api_key)
|
||||||
|
|
||||||
# add language tag if specified
|
# add language tag if specified
|
||||||
if params['language'] != 'all':
|
if params['language'] != 'all':
|
||||||
params['url'] += '&lr=' + params['language'].split('_')[0]
|
params['url'] += '&relevanceLanguage=' + params['language'].split('_')[0]
|
||||||
|
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
@ -47,36 +48,25 @@ def response(resp):
|
||||||
search_results = loads(resp.text)
|
search_results = loads(resp.text)
|
||||||
|
|
||||||
# return empty array if there are no results
|
# return empty array if there are no results
|
||||||
if not 'feed' in search_results:
|
if 'items' not in search_results:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
feed = search_results['feed']
|
|
||||||
|
|
||||||
# parse results
|
# parse results
|
||||||
for result in feed['entry']:
|
for result in search_results['items']:
|
||||||
url = [x['href'] for x in result['link'] if x['type'] == 'text/html']
|
videoid = result['id']['videoId']
|
||||||
|
|
||||||
if not url:
|
title = result['snippet']['title']
|
||||||
continue
|
|
||||||
|
|
||||||
# remove tracking
|
|
||||||
url = url[0].replace('feature=youtube_gdata', '')
|
|
||||||
if url.endswith('&'):
|
|
||||||
url = url[:-1]
|
|
||||||
|
|
||||||
videoid = url[32:]
|
|
||||||
|
|
||||||
title = result['title']['$t']
|
|
||||||
content = ''
|
content = ''
|
||||||
thumbnail = ''
|
thumbnail = ''
|
||||||
|
|
||||||
pubdate = result['published']['$t']
|
pubdate = result['snippet']['publishedAt']
|
||||||
publishedDate = parser.parse(pubdate)
|
publishedDate = parser.parse(pubdate)
|
||||||
|
|
||||||
if 'media$thumbnail' in result['media$group']:
|
thumbnail = result['snippet']['thumbnails']['high']['url']
|
||||||
thumbnail = result['media$group']['media$thumbnail'][0]['url']
|
|
||||||
|
|
||||||
content = result['content']['$t']
|
content = result['snippet']['description']
|
||||||
|
|
||||||
|
url = base_youtube_url + videoid
|
||||||
|
|
||||||
embedded = embedded_url.format(videoid=videoid)
|
embedded = embedded_url.format(videoid=videoid)
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
# Youtube (Videos)
|
||||||
|
#
|
||||||
|
# @website https://www.youtube.com/
|
||||||
|
# @provide-api yes (https://developers.google.com/apis-explorer/#p/youtube/v3/youtube.search.list)
|
||||||
|
#
|
||||||
|
# @using-api no
|
||||||
|
# @results HTML
|
||||||
|
# @stable no
|
||||||
|
# @parse url, title, content, publishedDate, thumbnail, embedded
|
||||||
|
|
||||||
|
from urllib import quote_plus
|
||||||
|
from lxml import html
|
||||||
|
from searx.engines.xpath import extract_text
|
||||||
|
from searx.utils import list_get
|
||||||
|
|
||||||
|
# engine dependent config
|
||||||
|
categories = ['videos', 'music']
|
||||||
|
paging = True
|
||||||
|
language_support = False
|
||||||
|
|
||||||
|
# search-url
|
||||||
|
base_url = 'https://www.youtube.com/results'
|
||||||
|
search_url = base_url + '?search_query={query}&page={page}'
|
||||||
|
|
||||||
|
embedded_url = '<iframe width="540" height="304" ' +\
|
||||||
|
'data-src="//www.youtube-nocookie.com/embed/{videoid}" ' +\
|
||||||
|
'frameborder="0" allowfullscreen></iframe>'
|
||||||
|
|
||||||
|
base_youtube_url = 'https://www.youtube.com/watch?v='
|
||||||
|
|
||||||
|
# specific xpath variables
|
||||||
|
results_xpath = "//ol/li/div[contains(@class, 'yt-lockup yt-lockup-tile yt-lockup-video vve-check')]"
|
||||||
|
url_xpath = './/h3/a/@href'
|
||||||
|
title_xpath = './/div[@class="yt-lockup-content"]/h3/a'
|
||||||
|
content_xpath = './/div[@class="yt-lockup-content"]/div[@class="yt-lockup-description yt-ui-ellipsis yt-ui-ellipsis-2"]'
|
||||||
|
|
||||||
|
|
||||||
|
# returns extract_text on the first result selected by the xpath or None
|
||||||
|
def extract_text_from_dom(result, xpath):
|
||||||
|
r = result.xpath(xpath)
|
||||||
|
if len(r) > 0:
|
||||||
|
return extract_text(r[0])
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# do search-request
|
||||||
|
def request(query, params):
|
||||||
|
params['url'] = search_url.format(query=quote_plus(query),
|
||||||
|
page=params['pageno'])
|
||||||
|
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
# get response from search-request
|
||||||
|
def response(resp):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
dom = html.fromstring(resp.text)
|
||||||
|
|
||||||
|
# parse results
|
||||||
|
for result in dom.xpath(results_xpath):
|
||||||
|
videoid = list_get(result.xpath('@data-context-item-id'), 0)
|
||||||
|
if videoid is not None:
|
||||||
|
url = base_youtube_url + videoid
|
||||||
|
thumbnail = 'https://i.ytimg.com/vi/' + videoid + '/hqdefault.jpg'
|
||||||
|
|
||||||
|
title = extract_text_from_dom(result, title_xpath) or videoid
|
||||||
|
content = extract_text_from_dom(result, content_xpath)
|
||||||
|
|
||||||
|
embedded = embedded_url.format(videoid=videoid)
|
||||||
|
|
||||||
|
# append result
|
||||||
|
results.append({'url': url,
|
||||||
|
'title': title,
|
||||||
|
'content': content,
|
||||||
|
'template': 'videos.html',
|
||||||
|
'embedded': embedded,
|
||||||
|
'thumbnail': thumbnail})
|
||||||
|
|
||||||
|
# return results
|
||||||
|
return results
|
|
@ -0,0 +1,77 @@
|
||||||
|
'''
|
||||||
|
searx is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
searx is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||||
|
|
||||||
|
(C) 2015 by Adam Tauber, <asciimoo@gmail.com>
|
||||||
|
'''
|
||||||
|
from sys import exit
|
||||||
|
from searx import logger
|
||||||
|
|
||||||
|
logger = logger.getChild('plugins')
|
||||||
|
|
||||||
|
from searx.plugins import (https_rewrite,
|
||||||
|
self_info,
|
||||||
|
search_on_category_select,
|
||||||
|
tracker_url_remover)
|
||||||
|
|
||||||
|
required_attrs = (('name', str),
|
||||||
|
('description', str),
|
||||||
|
('default_on', bool))
|
||||||
|
|
||||||
|
optional_attrs = (('js_dependencies', tuple),
|
||||||
|
('css_dependencies', tuple))
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin():
|
||||||
|
default_on = False
|
||||||
|
name = 'Default plugin'
|
||||||
|
description = 'Default plugin description'
|
||||||
|
|
||||||
|
|
||||||
|
class PluginStore():
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.plugins = []
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
for plugin in self.plugins:
|
||||||
|
yield plugin
|
||||||
|
|
||||||
|
def register(self, *plugins):
|
||||||
|
for plugin in plugins:
|
||||||
|
for plugin_attr, plugin_attr_type in required_attrs:
|
||||||
|
if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):
|
||||||
|
logger.critical('missing attribute "{0}", cannot load plugin: {1}'.format(plugin_attr, plugin))
|
||||||
|
exit(3)
|
||||||
|
for plugin_attr, plugin_attr_type in optional_attrs:
|
||||||
|
if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):
|
||||||
|
setattr(plugin, plugin_attr, plugin_attr_type())
|
||||||
|
plugin.id = plugin.name.replace(' ', '_')
|
||||||
|
self.plugins.append(plugin)
|
||||||
|
|
||||||
|
def call(self, plugin_type, request, *args, **kwargs):
|
||||||
|
ret = True
|
||||||
|
for plugin in request.user_plugins:
|
||||||
|
if hasattr(plugin, plugin_type):
|
||||||
|
ret = getattr(plugin, plugin_type)(request, *args, **kwargs)
|
||||||
|
if not ret:
|
||||||
|
break
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
plugins = PluginStore()
|
||||||
|
plugins.register(https_rewrite)
|
||||||
|
plugins.register(self_info)
|
||||||
|
plugins.register(search_on_category_select)
|
||||||
|
plugins.register(tracker_url_remover)
|
|
@ -18,11 +18,22 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||||
import re
|
import re
|
||||||
from urlparse import urlparse
|
from urlparse import urlparse
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
from os import listdir
|
from os import listdir, environ
|
||||||
from os.path import isfile, isdir, join
|
from os.path import isfile, isdir, join
|
||||||
from searx import logger
|
from searx.plugins import logger
|
||||||
|
from flask.ext.babel import gettext
|
||||||
|
from searx import searx_dir
|
||||||
|
|
||||||
|
|
||||||
|
name = "HTTPS rewrite"
|
||||||
|
description = gettext('Rewrite HTTP links to HTTPS if possible')
|
||||||
|
default_on = True
|
||||||
|
|
||||||
|
if 'SEARX_HTTPS_REWRITE_PATH' in environ:
|
||||||
|
rules_path = environ['SEARX_rules_path']
|
||||||
|
else:
|
||||||
|
rules_path = join(searx_dir, 'plugins/https_rules')
|
||||||
|
|
||||||
logger = logger.getChild("https_rewrite")
|
logger = logger.getChild("https_rewrite")
|
||||||
|
|
||||||
# https://gitweb.torproject.org/\
|
# https://gitweb.torproject.org/\
|
||||||
|
@ -33,7 +44,7 @@ https_rules = []
|
||||||
|
|
||||||
|
|
||||||
# load single ruleset from a xml file
|
# load single ruleset from a xml file
|
||||||
def load_single_https_ruleset(filepath):
|
def load_single_https_ruleset(rules_path):
|
||||||
ruleset = ()
|
ruleset = ()
|
||||||
|
|
||||||
# init parser
|
# init parser
|
||||||
|
@ -41,7 +52,7 @@ def load_single_https_ruleset(filepath):
|
||||||
|
|
||||||
# load and parse xml-file
|
# load and parse xml-file
|
||||||
try:
|
try:
|
||||||
tree = etree.parse(filepath, parser)
|
tree = etree.parse(rules_path, parser)
|
||||||
except:
|
except:
|
||||||
# TODO, error message
|
# TODO, error message
|
||||||
return ()
|
return ()
|
||||||
|
@ -207,3 +218,13 @@ def https_url_rewrite(result):
|
||||||
# target has matched, do not search over the other rules
|
# target has matched, do not search over the other rules
|
||||||
break
|
break
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def on_result(request, ctx):
|
||||||
|
result = ctx['result']
|
||||||
|
if result['parsed_url'].scheme == 'http':
|
||||||
|
https_url_rewrite(result)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
load_https_rules(rules_path)
|
|
@ -0,0 +1,23 @@
|
||||||
|
'''
|
||||||
|
searx is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
searx is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||||
|
|
||||||
|
(C) 2015 by Adam Tauber, <asciimoo@gmail.com>
|
||||||
|
'''
|
||||||
|
from flask.ext.babel import gettext
|
||||||
|
name = gettext('Search on category select')
|
||||||
|
description = gettext('Perform search immediately if a category selected. '
|
||||||
|
'Disable to select multiple categories. (JavaScript required)')
|
||||||
|
default_on = True
|
||||||
|
|
||||||
|
js_dependencies = ('js/search_on_category_select.js',)
|
|
@ -0,0 +1,44 @@
|
||||||
|
'''
|
||||||
|
searx is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
searx is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||||
|
|
||||||
|
(C) 2015 by Adam Tauber, <asciimoo@gmail.com>
|
||||||
|
'''
|
||||||
|
from flask.ext.babel import gettext
|
||||||
|
import re
|
||||||
|
name = "Self Informations"
|
||||||
|
description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
|
||||||
|
default_on = True
|
||||||
|
|
||||||
|
|
||||||
|
# Self User Agent regex
|
||||||
|
p = re.compile('.*user[ -]agent.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
|
||||||
|
# attach callback to the post search hook
|
||||||
|
# request: flask request object
|
||||||
|
# ctx: the whole local context of the pre search hook
|
||||||
|
def post_search(request, ctx):
|
||||||
|
if ctx['search'].query == 'ip':
|
||||||
|
x_forwarded_for = request.headers.getlist("X-Forwarded-For")
|
||||||
|
if x_forwarded_for:
|
||||||
|
ip = x_forwarded_for[0]
|
||||||
|
else:
|
||||||
|
ip = request.remote_addr
|
||||||
|
ctx['search'].answers.clear()
|
||||||
|
ctx['search'].answers.add(ip)
|
||||||
|
elif p.match(ctx['search'].query):
|
||||||
|
ua = request.user_agent
|
||||||
|
ctx['search'].answers.clear()
|
||||||
|
ctx['search'].answers.add(ua)
|
||||||
|
return True
|
|
@ -0,0 +1,44 @@
|
||||||
|
'''
|
||||||
|
searx is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
searx is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with searx. If not, see < http://www.gnu.org/licenses/ >.
|
||||||
|
|
||||||
|
(C) 2015 by Adam Tauber, <asciimoo@gmail.com>
|
||||||
|
'''
|
||||||
|
|
||||||
|
from flask.ext.babel import gettext
|
||||||
|
import re
|
||||||
|
from urlparse import urlunparse
|
||||||
|
|
||||||
|
regexes = {re.compile(r'utm_[^&]+&?'),
|
||||||
|
re.compile(r'(wkey|wemail)[^&]+&?'),
|
||||||
|
re.compile(r'&$')}
|
||||||
|
|
||||||
|
name = gettext('Tracker URL remover')
|
||||||
|
description = gettext('Remove trackers arguments from the returned URL')
|
||||||
|
default_on = True
|
||||||
|
|
||||||
|
|
||||||
|
def on_result(request, ctx):
|
||||||
|
query = ctx['result']['parsed_url'].query
|
||||||
|
|
||||||
|
if query == "":
|
||||||
|
return True
|
||||||
|
|
||||||
|
for reg in regexes:
|
||||||
|
query = reg.sub('', query)
|
||||||
|
|
||||||
|
if query != ctx['result']['parsed_url'].query:
|
||||||
|
ctx['result']['parsed_url'] = ctx['result']['parsed_url']._replace(query=query)
|
||||||
|
ctx['result']['url'] = urlunparse(ctx['result']['parsed_url'])
|
||||||
|
|
||||||
|
return True
|
|
@ -1,20 +1,63 @@
|
||||||
import requests
|
import requests
|
||||||
|
from itertools import cycle
|
||||||
|
from searx import settings
|
||||||
|
|
||||||
|
|
||||||
the_http_adapter = requests.adapters.HTTPAdapter(pool_connections=100)
|
class HTTPAdapterWithConnParams(requests.adapters.HTTPAdapter):
|
||||||
the_https_adapter = requests.adapters.HTTPAdapter(pool_connections=100)
|
|
||||||
|
def __init__(self, pool_connections=requests.adapters.DEFAULT_POOLSIZE,
|
||||||
|
pool_maxsize=requests.adapters.DEFAULT_POOLSIZE,
|
||||||
|
max_retries=requests.adapters.DEFAULT_RETRIES,
|
||||||
|
pool_block=requests.adapters.DEFAULT_POOLBLOCK,
|
||||||
|
**conn_params):
|
||||||
|
if max_retries == requests.adapters.DEFAULT_RETRIES:
|
||||||
|
self.max_retries = requests.adapters.Retry(0, read=False)
|
||||||
|
else:
|
||||||
|
self.max_retries = requests.adapters.Retry.from_int(max_retries)
|
||||||
|
self.config = {}
|
||||||
|
self.proxy_manager = {}
|
||||||
|
|
||||||
|
super(requests.adapters.HTTPAdapter, self).__init__()
|
||||||
|
|
||||||
|
self._pool_connections = pool_connections
|
||||||
|
self._pool_maxsize = pool_maxsize
|
||||||
|
self._pool_block = pool_block
|
||||||
|
self._conn_params = conn_params
|
||||||
|
|
||||||
|
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block, **conn_params)
|
||||||
|
|
||||||
|
def __setstate__(self, state):
|
||||||
|
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
|
||||||
|
# because self.poolmanager uses a lambda function, which isn't pickleable.
|
||||||
|
self.proxy_manager = {}
|
||||||
|
self.config = {}
|
||||||
|
|
||||||
|
for attr, value in state.items():
|
||||||
|
setattr(self, attr, value)
|
||||||
|
|
||||||
|
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
|
||||||
|
block=self._pool_block, **self._conn_params)
|
||||||
|
|
||||||
|
|
||||||
|
if settings['outgoing'].get('source_ips'):
|
||||||
|
http_adapters = cycle(HTTPAdapterWithConnParams(pool_connections=100, source_address=(source_ip, 0))
|
||||||
|
for source_ip in settings['outgoing']['source_ips'])
|
||||||
|
https_adapters = cycle(HTTPAdapterWithConnParams(pool_connections=100, source_address=(source_ip, 0))
|
||||||
|
for source_ip in settings['outgoing']['source_ips'])
|
||||||
|
else:
|
||||||
|
http_adapters = cycle((HTTPAdapterWithConnParams(pool_connections=100), ))
|
||||||
|
https_adapters = cycle((HTTPAdapterWithConnParams(pool_connections=100), ))
|
||||||
|
|
||||||
|
|
||||||
class SessionSinglePool(requests.Session):
|
class SessionSinglePool(requests.Session):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
global the_https_adapter, the_http_adapter
|
|
||||||
super(SessionSinglePool, self).__init__()
|
super(SessionSinglePool, self).__init__()
|
||||||
|
|
||||||
# reuse the same adapters
|
# reuse the same adapters
|
||||||
self.adapters.clear()
|
self.adapters.clear()
|
||||||
self.mount('https://', the_https_adapter)
|
self.mount('https://', next(https_adapters))
|
||||||
self.mount('http://', the_http_adapter)
|
self.mount('http://', next(http_adapters))
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
"""Call super, but clear adapters since there are managed globaly"""
|
"""Call super, but clear adapters since there are managed globaly"""
|
||||||
|
@ -23,8 +66,10 @@ class SessionSinglePool(requests.Session):
|
||||||
|
|
||||||
|
|
||||||
def request(method, url, **kwargs):
|
def request(method, url, **kwargs):
|
||||||
"""same as requests/requests/api.py request(...) except it use SessionSinglePool"""
|
"""same as requests/requests/api.py request(...) except it use SessionSinglePool and force proxies"""
|
||||||
|
global settings
|
||||||
session = SessionSinglePool()
|
session = SessionSinglePool()
|
||||||
|
kwargs['proxies'] = settings['outgoing'].get('proxies', None)
|
||||||
response = session.request(method=method, url=url, **kwargs)
|
response = session.request(method=method, url=url, **kwargs)
|
||||||
session.close()
|
session.close()
|
||||||
return response
|
return response
|
||||||
|
|
|
@ -23,6 +23,7 @@ from operator import itemgetter
|
||||||
from Queue import Queue
|
from Queue import Queue
|
||||||
from time import time
|
from time import time
|
||||||
from urlparse import urlparse, unquote
|
from urlparse import urlparse, unquote
|
||||||
|
from searx import settings
|
||||||
from searx.engines import (
|
from searx.engines import (
|
||||||
categories, engines
|
categories, engines
|
||||||
)
|
)
|
||||||
|
@ -205,6 +206,10 @@ def score_results(results):
|
||||||
# if there is no duplicate found, append result
|
# if there is no duplicate found, append result
|
||||||
else:
|
else:
|
||||||
res['score'] = score
|
res['score'] = score
|
||||||
|
# if the result has no scheme, use http as default
|
||||||
|
if res['parsed_url'].scheme == '':
|
||||||
|
res['parsed_url'] = res['parsed_url']._replace(scheme="http")
|
||||||
|
|
||||||
results.append(res)
|
results.append(res)
|
||||||
|
|
||||||
results = sorted(results, key=itemgetter('score'), reverse=True)
|
results = sorted(results, key=itemgetter('score'), reverse=True)
|
||||||
|
@ -329,8 +334,8 @@ class Search(object):
|
||||||
self.blocked_engines = get_blocked_engines(engines, request.cookies)
|
self.blocked_engines = get_blocked_engines(engines, request.cookies)
|
||||||
|
|
||||||
self.results = []
|
self.results = []
|
||||||
self.suggestions = []
|
self.suggestions = set()
|
||||||
self.answers = []
|
self.answers = set()
|
||||||
self.infoboxes = []
|
self.infoboxes = []
|
||||||
self.request_data = {}
|
self.request_data = {}
|
||||||
|
|
||||||
|
@ -382,9 +387,19 @@ class Search(object):
|
||||||
# otherwise, using defined categories to
|
# otherwise, using defined categories to
|
||||||
# calculate which engines should be used
|
# calculate which engines should be used
|
||||||
else:
|
else:
|
||||||
# set used categories
|
# set categories/engines
|
||||||
|
load_default_categories = True
|
||||||
for pd_name, pd in self.request_data.items():
|
for pd_name, pd in self.request_data.items():
|
||||||
if pd_name.startswith('category_'):
|
if pd_name == 'categories':
|
||||||
|
self.categories.extend(categ for categ in map(unicode.strip, pd.split(',')) if categ in categories)
|
||||||
|
elif pd_name == 'engines':
|
||||||
|
pd_engines = [{'category': engines[engine].categories[0],
|
||||||
|
'name': engine}
|
||||||
|
for engine in map(unicode.strip, pd.split(',')) if engine in engines]
|
||||||
|
if pd_engines:
|
||||||
|
self.engines.extend(pd_engines)
|
||||||
|
load_default_categories = False
|
||||||
|
elif pd_name.startswith('category_'):
|
||||||
category = pd_name[9:]
|
category = pd_name[9:]
|
||||||
|
|
||||||
# if category is not found in list, skip
|
# if category is not found in list, skip
|
||||||
|
@ -398,6 +413,12 @@ class Search(object):
|
||||||
# remove category from list if property is set to 'off'
|
# remove category from list if property is set to 'off'
|
||||||
self.categories.remove(category)
|
self.categories.remove(category)
|
||||||
|
|
||||||
|
if not load_default_categories:
|
||||||
|
if not self.categories:
|
||||||
|
self.categories = list(set(engine['category']
|
||||||
|
for engine in self.engines))
|
||||||
|
return
|
||||||
|
|
||||||
# if no category is specified for this search,
|
# if no category is specified for this search,
|
||||||
# using user-defined default-configuration which
|
# using user-defined default-configuration which
|
||||||
# (is stored in cookie)
|
# (is stored in cookie)
|
||||||
|
@ -429,9 +450,6 @@ class Search(object):
|
||||||
requests = []
|
requests = []
|
||||||
results_queue = Queue()
|
results_queue = Queue()
|
||||||
results = {}
|
results = {}
|
||||||
suggestions = set()
|
|
||||||
answers = set()
|
|
||||||
infoboxes = []
|
|
||||||
|
|
||||||
# increase number of searches
|
# increase number of searches
|
||||||
number_of_searches += 1
|
number_of_searches += 1
|
||||||
|
@ -462,12 +480,17 @@ class Search(object):
|
||||||
request_params['category'] = selected_engine['category']
|
request_params['category'] = selected_engine['category']
|
||||||
request_params['started'] = time()
|
request_params['started'] = time()
|
||||||
request_params['pageno'] = self.pageno
|
request_params['pageno'] = self.pageno
|
||||||
|
|
||||||
|
if hasattr(engine, 'language') and engine.language:
|
||||||
|
request_params['language'] = engine.language
|
||||||
|
else:
|
||||||
request_params['language'] = self.lang
|
request_params['language'] = self.lang
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# 0 = None, 1 = Moderate, 2 = Strict
|
# 0 = None, 1 = Moderate, 2 = Strict
|
||||||
request_params['safesearch'] = int(request.cookies.get('safesearch', 1))
|
request_params['safesearch'] = int(request.cookies.get('safesearch'))
|
||||||
except ValueError:
|
except Exception:
|
||||||
request_params['safesearch'] = 1
|
request_params['safesearch'] = settings['search']['safe_search']
|
||||||
|
|
||||||
# update request parameters dependent on
|
# update request parameters dependent on
|
||||||
# search-engine (contained in engines folder)
|
# search-engine (contained in engines folder)
|
||||||
|
@ -511,7 +534,7 @@ class Search(object):
|
||||||
selected_engine['name']))
|
selected_engine['name']))
|
||||||
|
|
||||||
if not requests:
|
if not requests:
|
||||||
return results, suggestions, answers, infoboxes
|
return self
|
||||||
# send all search-request
|
# send all search-request
|
||||||
threaded_requests(requests)
|
threaded_requests(requests)
|
||||||
|
|
||||||
|
@ -519,17 +542,17 @@ class Search(object):
|
||||||
engine_name, engine_results = results_queue.get_nowait()
|
engine_name, engine_results = results_queue.get_nowait()
|
||||||
|
|
||||||
# TODO type checks
|
# TODO type checks
|
||||||
[suggestions.add(x['suggestion'])
|
[self.suggestions.add(x['suggestion'])
|
||||||
for x in list(engine_results)
|
for x in list(engine_results)
|
||||||
if 'suggestion' in x
|
if 'suggestion' in x
|
||||||
and engine_results.remove(x) is None]
|
and engine_results.remove(x) is None]
|
||||||
|
|
||||||
[answers.add(x['answer'])
|
[self.answers.add(x['answer'])
|
||||||
for x in list(engine_results)
|
for x in list(engine_results)
|
||||||
if 'answer' in x
|
if 'answer' in x
|
||||||
and engine_results.remove(x) is None]
|
and engine_results.remove(x) is None]
|
||||||
|
|
||||||
infoboxes.extend(x for x in list(engine_results)
|
self.infoboxes.extend(x for x in list(engine_results)
|
||||||
if 'infobox' in x
|
if 'infobox' in x
|
||||||
and engine_results.remove(x) is None)
|
and engine_results.remove(x) is None)
|
||||||
|
|
||||||
|
@ -541,16 +564,16 @@ class Search(object):
|
||||||
engines[engine_name].stats['result_count'] += len(engine_results)
|
engines[engine_name].stats['result_count'] += len(engine_results)
|
||||||
|
|
||||||
# score results and remove duplications
|
# score results and remove duplications
|
||||||
results = score_results(results)
|
self.results = score_results(results)
|
||||||
|
|
||||||
# merge infoboxes according to their ids
|
# merge infoboxes according to their ids
|
||||||
infoboxes = merge_infoboxes(infoboxes)
|
self.infoboxes = merge_infoboxes(self.infoboxes)
|
||||||
|
|
||||||
# update engine stats, using calculated score
|
# update engine stats, using calculated score
|
||||||
for result in results:
|
for result in self.results:
|
||||||
for res_engine in result['engines']:
|
for res_engine in result['engines']:
|
||||||
engines[result['engine']]\
|
engines[result['engine']]\
|
||||||
.stats['score_count'] += result['score']
|
.stats['score_count'] += result['score']
|
||||||
|
|
||||||
# return results, suggestions, answers and infoboxes
|
# return results, suggestions, answers and infoboxes
|
||||||
return results, suggestions, answers, infoboxes
|
return self
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue