diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..542e9b0
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,6 @@
+include README.md
+include MANIFEST.in
+include setup.py
+include scripts/google
+include requirements.txt
+include googlesearch/user_agents.txt.gz
diff --git a/PKG-INFO b/PKG-INFO
new file mode 100644
index 0000000..82c7e2d
--- /dev/null
+++ b/PKG-INFO
@@ -0,0 +1,18 @@
+Metadata-Version: 1.1
+Name: google
+Version: 3.0.0
+Summary: Python bindings to the Google search engine.
+Home-page: http://breakingcode.wordpress.com/
+Author: Mario Vilas
+Author-email: mvilas@gmail.com
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Environment :: Console
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires: beautifulsoup4
+Provides: googlesearch
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..d3b8b30
--- /dev/null
+++ b/README.md
@@ -0,0 +1,19 @@
+googlesearch
+============
+
+Google search from Python.
+
+https://python-googlesearch.readthedocs.io/en/latest/
+
+Usage example
+-------------
+
+    # Get the first 20 hits for: "Breaking Code" WordPress blog
+    from googlesearch import search
+    for url in search('"Breaking Code" WordPress blog', stop=20):
+        print(url)
+
+Installing
+----------
+
+    pip install google
diff --git a/google.egg-info/PKG-INFO b/google.egg-info/PKG-INFO
new file mode 100644
index 0000000..82c7e2d
--- /dev/null
+++ b/google.egg-info/PKG-INFO
@@ -0,0 +1,18 @@
+Metadata-Version: 1.1
+Name: google
+Version: 3.0.0
+Summary: Python bindings to the Google search engine.
+Home-page: http://breakingcode.wordpress.com/
+Author: Mario Vilas
+Author-email: mvilas@gmail.com
+License: UNKNOWN
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Environment :: Console
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires: beautifulsoup4
+Provides: googlesearch
diff --git a/google.egg-info/SOURCES.txt b/google.egg-info/SOURCES.txt
new file mode 100644
index 0000000..27d29d2
--- /dev/null
+++ b/google.egg-info/SOURCES.txt
@@ -0,0 +1,13 @@
+MANIFEST.in
+README.md
+requirements.txt
+setup.cfg
+setup.py
+google.egg-info/PKG-INFO
+google.egg-info/SOURCES.txt
+google.egg-info/dependency_links.txt
+google.egg-info/requires.txt
+google.egg-info/top_level.txt
+googlesearch/__init__.py
+googlesearch/user_agents.txt.gz
+scripts/google
\ No newline at end of file
diff --git a/google.egg-info/dependency_links.txt b/google.egg-info/dependency_links.txt
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/google.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/google.egg-info/requires.txt b/google.egg-info/requires.txt
new file mode 100644
index 0000000..c1f5f71
--- /dev/null
+++ b/google.egg-info/requires.txt
@@ -0,0 +1 @@
+beautifulsoup4
diff --git a/google.egg-info/top_level.txt b/google.egg-info/top_level.txt
new file mode 100644
index 0000000..6b47fbd
--- /dev/null
+++ b/google.egg-info/top_level.txt
@@ -0,0 +1 @@
+googlesearch
diff --git a/googlesearch/__init__.py b/googlesearch/__init__.py
new file mode 100644
index 0000000..6182636
--- /dev/null
+++ b/googlesearch/__init__.py
@@ -0,0 +1,376 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009-2020, Mario Vilas
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#     * Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice,this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#     * Neither the name of the copyright holder nor the names of its
+#       contributors may be used to endorse or promote products derived from
+#       this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import random
+import sys
+import time
+import ssl
+
+if sys.version_info[0] > 2:
+    from http.cookiejar import LWPCookieJar
+    from urllib.request import Request, urlopen
+    from urllib.parse import quote_plus, urlparse, parse_qs
+else:
+    from cookielib import LWPCookieJar
+    from urllib import quote_plus
+    from urllib2 import Request, urlopen
+    from urlparse import urlparse, parse_qs
+
+try:
+    from bs4 import BeautifulSoup
+    is_bs4 = True
+except ImportError:
+    from BeautifulSoup import BeautifulSoup
+    is_bs4 = False
+
+__all__ = [
+
+    # Main search function.
+    'search',
+
+    # Shortcut for "get lucky" search.
+    'lucky',
+
+    # Miscellaneous utility functions.
+    'get_random_user_agent', 'get_tbs',
+]
+
+# URL templates to make Google searches.
+url_home = "https://www.google.%(tld)s/"
+url_search = "https://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&" \
+             "btnG=Google+Search&tbs=%(tbs)s&safe=%(safe)s&" \
+             "cr=%(country)s"
+url_next_page = "https://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&" \
+                "start=%(start)d&tbs=%(tbs)s&safe=%(safe)s&" \
+                "cr=%(country)s"
+url_search_num = "https://www.google.%(tld)s/search?hl=%(lang)s&q=%(query)s&" \
+                 "num=%(num)d&btnG=Google+Search&tbs=%(tbs)s&safe=%(safe)s&" \
+                 "cr=%(country)s"
+url_next_page_num = "https://www.google.%(tld)s/search?hl=%(lang)s&" \
+                    "q=%(query)s&num=%(num)d&start=%(start)d&tbs=%(tbs)s&" \
+                    "safe=%(safe)s&cr=%(country)s"
+url_parameters = (
+    'hl', 'q', 'num', 'btnG', 'start', 'tbs', 'safe', 'cr')
+
+# Cookie jar. Stored at the user's home folder.
+# If the cookie jar is inaccessible, the errors are ignored.
+home_folder = os.getenv('HOME')
+if not home_folder:
+    home_folder = os.getenv('USERHOME')
+    if not home_folder:
+        home_folder = '.'   # Use the current folder on error.
+cookie_jar = LWPCookieJar(os.path.join(home_folder, '.google-cookie'))
+try:
+    cookie_jar.load()
+except Exception:
+    pass
+
+# Default user agent, unless instructed by the user to change it.
+USER_AGENT = 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)'
+
+# Load the list of valid user agents from the install folder.
+# The search order is:
+#   * user_agents.txt.gz
+#   * user_agents.txt
+#   * default user agent
+try:
+    install_folder = os.path.abspath(os.path.split(__file__)[0])
+    try:
+        user_agents_file = os.path.join(install_folder, 'user_agents.txt.gz')
+        import gzip
+        fp = gzip.open(user_agents_file, 'rb')
+        try:
+            user_agents_list = [_.strip() for _ in fp.readlines()]
+        finally:
+            fp.close()
+            del fp
+    except Exception:
+        user_agents_file = os.path.join(install_folder, 'user_agents.txt')
+        with open(user_agents_file) as fp:
+            user_agents_list = [_.strip() for _ in fp.readlines()]
+except Exception:
+    user_agents_list = [USER_AGENT]
+
+
+# Get a random user agent.
+def get_random_user_agent():
+    """
+    Get a random user agent string.
+
+    :rtype: str
+    :return: Random user agent string.
+    """
+    return random.choice(user_agents_list)
+
+
+# Helper function to format the tbs parameter.
+def get_tbs(from_date, to_date):
+    """
+    Helper function to format the tbs parameter.
+
+    :param datetime.date from_date: Python date object.
+    :param datetime.date to_date: Python date object.
+
+    :rtype: str
+    :return: Dates encoded in tbs format.
+    """
+    from_date = from_date.strftime('%m/%d/%Y')
+    to_date = to_date.strftime('%m/%d/%Y')
+    return 'cdr:1,cd_min:%(from_date)s,cd_max:%(to_date)s' % vars()
+
+
+# Request the given URL and return the response page, using the cookie jar.
+# If the cookie jar is inaccessible, the errors are ignored.
+def get_page(url, user_agent=None, verify_ssl=True):
+    """
+    Request the given URL and return the response page, using the cookie jar.
+
+    :param str url: URL to retrieve.
+    :param str user_agent: User agent for the HTTP requests.
+        Use None for the default.
+    :param bool verify_ssl: Verify the SSL certificate to prevent
+        traffic interception attacks. Defaults to True.
+
+    :rtype: str
+    :return: Web page retrieved for the given URL.
+
+    :raises IOError: An exception is raised on error.
+    :raises urllib2.URLError: An exception is raised on error.
+    :raises urllib2.HTTPError: An exception is raised on error.
+    """
+    if user_agent is None:
+        user_agent = USER_AGENT
+    request = Request(url)
+    request.add_header('User-Agent', user_agent)
+    cookie_jar.add_cookie_header(request)
+    if verify_ssl:
+        response = urlopen(request)
+    else:
+        context = ssl._create_unverified_context()
+        response = urlopen(request, context=context)
+    cookie_jar.extract_cookies(response, request)
+    html = response.read()
+    response.close()
+    try:
+        cookie_jar.save()
+    except Exception:
+        pass
+    return html
+
+
+# Filter links found in the Google result pages HTML code.
+# Returns None if the link doesn't yield a valid result.
+def filter_result(link):
+    try:
+
+        # Decode hidden URLs.
+        if link.startswith('/url?'):
+            o = urlparse(link, 'http')
+            link = parse_qs(o.query)['q'][0]
+
+        # Valid results are absolute URLs not pointing to a Google domain,
+        # like images.google.com or googleusercontent.com for example.
+        # TODO this could be improved!
+        o = urlparse(link, 'http')
+        if o.netloc and 'google' not in o.netloc:
+            return link
+
+    # On error, return None.
+    except Exception:
+        pass
+
+
+# Returns a generator that yields URLs.
+def search(query, tld='com', lang='en', tbs='0', safe='off', num=10, start=0,
+           stop=None, pause=2.0, country='', extra_params=None,
+           user_agent=None, verify_ssl=True):
+    """
+    Search the given query string using Google.
+
+    :param str query: Query string. Must NOT be url-encoded.
+    :param str tld: Top level domain.
+    :param str lang: Language.
+    :param str tbs: Time limits (i.e "qdr:h" => last hour,
+        "qdr:d" => last 24 hours, "qdr:m" => last month).
+    :param str safe: Safe search.
+    :param int num: Number of results per page.
+    :param int start: First result to retrieve.
+    :param int stop: Last result to retrieve.
+        Use None to keep searching forever.
+    :param float pause: Lapse to wait between HTTP requests.
+        A lapse too long will make the search slow, but a lapse too short may
+        cause Google to block your IP. Your mileage may vary!
+    :param str country: Country or region to focus the search on. Similar to
+        changing the TLD, but does not yield exactly the same results.
+        Only Google knows why...
+    :param dict extra_params: A dictionary of extra HTTP GET
+        parameters, which must be URL encoded. For example if you don't want
+        Google to filter similar results you can set the extra_params to
+        {'filter': '0'} which will append '&filter=0' to every query.
+    :param str user_agent: User agent for the HTTP requests.
+        Use None for the default.
+    :param bool verify_ssl: Verify the SSL certificate to prevent
+        traffic interception attacks. Defaults to True.
+
+    :rtype: generator of str
+    :return: Generator (iterator) that yields found URLs.
+        If the stop parameter is None the iterator will loop forever.
+    """
+    # Set of hashes for the results found.
+    # This is used to avoid repeated results.
+    hashes = set()
+
+    # Count the number of links yielded.
+    count = 0
+
+    # Prepare the search string.
+    query = quote_plus(query)
+
+    # If no extra_params is given, create an empty dictionary.
+    # We should avoid using an empty dictionary as a default value
+    # in a function parameter in Python.
+    if not extra_params:
+        extra_params = {}
+
+    # Check extra_params for overlapping.
+    for builtin_param in url_parameters:
+        if builtin_param in extra_params.keys():
+            raise ValueError(
+                'GET parameter "%s" is overlapping with \
+                the built-in GET parameter',
+                builtin_param
+            )
+
+    # Grab the cookie from the home page.
+    get_page(url_home % vars(), user_agent, verify_ssl)
+
+    # Prepare the URL of the first request.
+    if start:
+        if num == 10:
+            url = url_next_page % vars()
+        else:
+            url = url_next_page_num % vars()
+    else:
+        if num == 10:
+            url = url_search % vars()
+        else:
+            url = url_search_num % vars()
+
+    # Loop until we reach the maximum result, if any (otherwise, loop forever).
+    while not stop or count < stop:
+
+        # Remeber last count to detect the end of results.
+        last_count = count
+
+        # Append extra GET parameters to the URL.
+        # This is done on every iteration because we're
+        # rebuilding the entire URL at the end of this loop.
+        for k, v in extra_params.items():
+            k = quote_plus(k)
+            v = quote_plus(v)
+            url = url + ('&%s=%s' % (k, v))
+
+        # Sleep between requests.
+        # Keeps Google from banning you for making too many requests.
+        time.sleep(pause)
+
+        # Request the Google Search results page.
+        html = get_page(url, user_agent, verify_ssl)
+
+        # Parse the response and get every anchored URL.
+        if is_bs4:
+            soup = BeautifulSoup(html, 'html.parser')
+        else:
+            soup = BeautifulSoup(html)
+        try:
+            anchors = soup.find(id='search').findAll('a')
+            # Sometimes (depending on the User-agent) there is
+            # no id "search" in html response...
+        except AttributeError:
+            # Remove links of the top bar.
+            gbar = soup.find(id='gbar')
+            if gbar:
+                gbar.clear()
+            anchors = soup.findAll('a')
+
+        # Process every anchored URL.
+        for a in anchors:
+
+            # Get the URL from the anchor tag.
+            try:
+                link = a['href']
+            except KeyError:
+                continue
+
+            # Filter invalid links and links pointing to Google itself.
+            link = filter_result(link)
+            if not link:
+                continue
+
+            # Discard repeated results.
+            h = hash(link)
+            if h in hashes:
+                continue
+            hashes.add(h)
+
+            # Yield the result.
+            yield link
+
+            # Increase the results counter.
+            # If we reached the limit, stop.
+            count += 1
+            if stop and count >= stop:
+                return
+
+        # End if there are no more results.
+        # XXX TODO review this logic, not sure if this is still true!
+        if last_count == count:
+            break
+
+        # Prepare the URL for the next request.
+        start += num
+        if num == 10:
+            url = url_next_page % vars()
+        else:
+            url = url_next_page_num % vars()
+
+
+# Shortcut to single-item search.
+# Evaluates the iterator to return the single URL as a string.
+def lucky(*args, **kwargs):
+    """
+    Shortcut to single-item search.
+
+    Same arguments as the main search function, but the return value changes.
+
+    :rtype: str
+    :return: URL found by Google.
+    """
+    return next(search(*args, **kwargs))
diff --git a/googlesearch/user_agents.txt.gz b/googlesearch/user_agents.txt.gz
new file mode 100644
index 0000000..8890f51
Binary files /dev/null and b/googlesearch/user_agents.txt.gz differ
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..08a2d6d
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1 @@
+beautifulsoup4>=4.0
diff --git a/scripts/google b/scripts/google
new file mode 100755
index 0000000..eca9185
--- /dev/null
+++ b/scripts/google
@@ -0,0 +1,118 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009-2020, Mario Vilas
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#     * Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice,this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#     * Neither the name of the copyright holder nor the names of its
+#       contributors may be used to endorse or promote products derived from
+#       this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import sys
+
+from googlesearch import search, get_random_user_agent
+
+# TODO port to argparse
+from optparse import OptionParser, IndentedHelpFormatter
+
+
+class BannerHelpFormatter(IndentedHelpFormatter):
+
+    "Just a small tweak to optparse to be able to print a banner."
+
+    def __init__(self, banner, *argv, **argd):
+        self.banner = banner
+        IndentedHelpFormatter.__init__(self, *argv, **argd)
+
+    def format_usage(self, usage):
+        msg = IndentedHelpFormatter.format_usage(self, usage)
+        return '%s\n%s' % (self.banner, msg)
+
+
+def main():
+
+    # Parse the command line arguments.
+    formatter = BannerHelpFormatter(
+        "Python script to use the Google search engine\n"
+        "By Mario Vilas (mvilas at gmail dot com)\n"
+        "https://github.com/MarioVilas/googlesearch\n"
+    )
+    parser = OptionParser(formatter=formatter)
+    parser.set_usage("%prog [options] query")
+    parser.add_option(
+        '--tld', metavar='TLD', type='string', default='com',
+        help="top level domain to use [default: com]")
+    parser.add_option(
+        '--lang', metavar='LANGUAGE', type='string', default='en',
+        help="produce results in the given language [default: en]")
+    parser.add_option(
+        '--tbs', metavar='TBS', type='string', default='0',
+        help="produce results from period [default: 0]")
+    parser.add_option(
+        '--safe', metavar='SAFE', type='string', default='off',
+        help="kids safe search [default: off]")
+    parser.add_option(
+        '--country', metavar='COUNTRY', type='string', default='',
+        help="region to restrict search on [default: not restricted]")
+    parser.add_option(
+        '--num', metavar='NUMBER', type='int', default=10,
+        help="number of results per page [default: 10]")
+    parser.add_option(
+        '--start', metavar='NUMBER', type='int', default=0,
+        help="first result to retrieve [default: 0]")
+    parser.add_option(
+        '--stop', metavar='NUMBER', type='int', default=0,
+        help="last result to retrieve [default: unlimited]")
+    parser.add_option(
+        '--pause', metavar='SECONDS', type='float', default=2.0,
+        help="pause between HTTP requests [default: 2.0]")
+    parser.add_option(
+        '--rua', action='store_true', default=False,
+        help="Randomize the User-Agent [default: no]")
+    parser.add_option(
+        '--insecure', dest="verify_ssl", action='store_false', default=True,
+        help="Randomize the User-Agent [default: no]")
+    (options, args) = parser.parse_args()
+    query = ' '.join(args)
+    if not query:
+        parser.print_help()
+        sys.exit(2)
+    params = [
+        (k, v) for (k, v) in options.__dict__.items()
+        if not k.startswith('_')]
+    params = dict(params)
+
+    # Randomize the user agent if requested.
+    if 'rua' in params and params.pop('rua'):
+        params['user_agent'] = get_random_user_agent()
+
+    # Run the query.
+    for url in search(query, **params):
+        print(url)
+        try:
+            sys.stdout.flush()
+        except Exception:
+            pass
+
+
+if __name__ == '__main__':
+    main()
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..adf5ed7
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,7 @@
+[bdist_wheel]
+universal = 1
+
+[egg_info]
+tag_build = 
+tag_date = 0
+
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..ea3697a
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009-2020, Mario Vilas
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+#     * Redistributions of source code must retain the above copyright notice,
+#       this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice,this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#     * Neither the name of the copyright holder nor the names of its
+#       contributors may be used to endorse or promote products derived from
+#       this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from os import chdir
+from os.path import abspath, join, split
+
+# Make sure we are standing in the correct directory.
+# Old versions of distutils didn't take care of this.
+here = split(abspath(__file__))[0]
+chdir(here)
+
+# Package metadata.
+metadata = dict(
+    name='google',
+    provides=['googlesearch'],
+    requires=['beautifulsoup4'],
+    packages=['googlesearch'],
+    scripts=[join('scripts', 'google')],
+    package_data={'googlesearch': ['user_agents.txt.gz']},
+    include_package_data=True,
+    version="3.0.0",
+    description="Python bindings to the Google search engine.",
+    author="Mario Vilas",
+    author_email="mvilas@gmail.com",
+    url="http://breakingcode.wordpress.com/",
+    classifiers=[
+        "Development Status :: 5 - Production/Stable",
+        "Intended Audience :: Developers",
+        "License :: OSI Approved :: BSD License",
+        "Environment :: Console",
+        "Programming Language :: Python",
+        "Topic :: Software Development :: Libraries :: Python Modules",
+     ],
+)
+
+# Prefer setuptools over the old distutils.
+# If setuptools is available, use install_requires.
+try:
+    from setuptools import setup
+    metadata['install_requires'] = metadata['requires']
+except ImportError:
+    from distutils.core import setup
+
+# Run the setup script.
+setup(**metadata)